Skip to content

Commit ff69bce

Browse files
committed
reset TestDataNodeMetrics
1 parent 59b6ca6 commit ff69bce

File tree

1 file changed

+24
-33
lines changed

1 file changed

+24
-33
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java

Lines changed: 24 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,9 @@ public void testDataNodeMetrics() throws Exception {
8888
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
8989
try {
9090
FileSystem fs = cluster.getFileSystem();
91-
final long LONG_FILE_LEN = Integer.MAX_VALUE + 1L;
91+
final long LONG_FILE_LEN = Integer.MAX_VALUE+1L;
9292
DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
93-
LONG_FILE_LEN, (short) 1, 1L);
93+
LONG_FILE_LEN, (short)1, 1L);
9494
List<DataNode> datanodes = cluster.getDataNodes();
9595
assertEquals(datanodes.size(), 1);
9696
DataNode datanode = datanodes.get(0);
@@ -99,9 +99,7 @@ public void testDataNodeMetrics() throws Exception {
9999
assertTrue("Expected non-zero number of incremental block reports",
100100
getLongCounter("IncrementalBlockReportsNumOps", rb) > 0);
101101
} finally {
102-
if (cluster != null) {
103-
cluster.shutdown();
104-
}
102+
if (cluster != null) {cluster.shutdown();}
105103
}
106104
}
107105

@@ -116,26 +114,24 @@ public void testSendDataPacketMetrics() throws Exception {
116114
// Create and read a 1 byte file
117115
Path tmpfile = new Path("/tmp.txt");
118116
DFSTestUtil.createFile(fs, tmpfile,
119-
(long) 1, (short) 1, 1L);
117+
(long)1, (short)1, 1L);
120118
DFSTestUtil.readFile(fs, tmpfile);
121119
List<DataNode> datanodes = cluster.getDataNodes();
122120
assertEquals(datanodes.size(), 1);
123121
DataNode datanode = datanodes.get(0);
124122
MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
125123
// Expect 2 packets, 1 for the 1 byte read, 1 for the empty packet
126124
// signaling the end of the block
127-
assertCounter("SendDataPacketTransferNanosNumOps", (long) 2, rb);
128-
assertCounter("SendDataPacketBlockedOnNetworkNanosNumOps", (long) 2, rb);
125+
assertCounter("SendDataPacketTransferNanosNumOps", (long)2, rb);
126+
assertCounter("SendDataPacketBlockedOnNetworkNanosNumOps", (long)2, rb);
129127
// Wait for at least 1 rollover
130128
Thread.sleep((interval + 1) * 1000);
131129
// Check that the sendPacket percentiles rolled to non-zero values
132130
String sec = interval + "s";
133131
assertQuantileGauges("SendDataPacketBlockedOnNetworkNanos" + sec, rb);
134132
assertQuantileGauges("SendDataPacketTransferNanos" + sec, rb);
135133
} finally {
136-
if (cluster != null) {
137-
cluster.shutdown();
138-
}
134+
if (cluster != null) {cluster.shutdown();}
139135
}
140136
}
141137

@@ -169,9 +165,7 @@ public void testReceivePacketMetrics() throws Exception {
169165
assertQuantileGauges("FlushNanos" + sec, dnMetrics);
170166
assertQuantileGauges("FsyncNanos" + sec, dnMetrics);
171167
} finally {
172-
if (cluster != null) {
173-
cluster.shutdown();
174-
}
168+
if (cluster != null) {cluster.shutdown();}
175169
}
176170
}
177171

@@ -276,8 +270,8 @@ public void testFsDatasetMetrics() throws Exception {
276270
}
277271

278272
/**
279-
* Tests that round-trip acks in a datanode write pipeline are correctly
280-
* measured.
273+
* Tests that round-trip acks in a datanode write pipeline are correctly
274+
* measured.
281275
*/
282276
@Test
283277
public void testRoundTripAckMetric() throws Exception {
@@ -315,14 +309,14 @@ public void testRoundTripAckMetric() throws Exception {
315309
break;
316310
}
317311
}
318-
assertNotNull("Could not find the head of the datanode write pipeline",
312+
assertNotNull("Could not find the head of the datanode write pipeline",
319313
headNode);
320314
// Close the file and wait for the metrics to rollover
321315
Thread.sleep((interval + 1) * 1000);
322316
// Check the ack was received
323317
MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics()
324318
.name());
325-
assertTrue("Expected non-zero number of acks",
319+
assertTrue("Expected non-zero number of acks",
326320
getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
327321
assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval
328322
+ "s", dnMetrics);
@@ -333,7 +327,7 @@ public void testRoundTripAckMetric() throws Exception {
333327
}
334328
}
335329

336-
@Test(timeout = 60000)
330+
@Test(timeout=60000)
337331
public void testTimeoutMetric() throws Exception {
338332
final Configuration conf = new HdfsConfiguration();
339333
final Path path = new Path("/test");
@@ -387,7 +381,7 @@ public void testTimeoutMetric() throws Exception {
387381
*
388382
* @throws Exception
389383
*/
390-
@Test(timeout = 120000)
384+
@Test(timeout=120000)
391385
public void testDataNodeTimeSpend() throws Exception {
392386
Configuration conf = new HdfsConfiguration();
393387
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + 60);
@@ -477,16 +471,16 @@ public void testDatanodeActiveXceiversCount() throws Exception {
477471

478472
MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
479473
long dataNodeActiveXceiversCount = MetricsAsserts.getIntGauge(
480-
"DataNodeActiveXceiversCount", rb);
474+
"DataNodeActiveXceiversCount", rb);
481475
assertEquals(dataNodeActiveXceiversCount, 0);
482476

483477
Path path = new Path("/counter.txt");
484478
DFSTestUtil.createFile(fs, path, 204800000, (short) 3, Time
485-
.monotonicNow());
479+
.monotonicNow());
486480

487481
MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
488482
dataNodeActiveXceiversCount = MetricsAsserts.getIntGauge(
489-
"DataNodeActiveXceiversCount", rbNew);
483+
"DataNodeActiveXceiversCount", rbNew);
490484
assertTrue(dataNodeActiveXceiversCount >= 0);
491485
} finally {
492486
if (cluster != null) {
@@ -573,12 +567,12 @@ public void testDNShouldNotDeleteBlockONTooManyOpenFiles()
573567
fs.getClient().getLocatedBlocks(p.toString(), 0).get(0).getBlock();
574568
try {
575569
new BlockSender(b, 0, -1, false, true, true,
576-
cluster.getDataNodes().get(0), null,
577-
CachingStrategy.newDefaultStrategy());
570+
cluster.getDataNodes().get(0), null,
571+
CachingStrategy.newDefaultStrategy());
578572
fail("Must throw FileNotFoundException");
579573
} catch (FileNotFoundException fe) {
580574
assertTrue("Should throw too many open files",
581-
fe.getMessage().contains("Too many open files"));
575+
fe.getMessage().contains("Too many open files"));
582576
}
583577
cluster.triggerHeartbeats(); // IBR delete ack
584578
//After DN throws too many open files
@@ -593,7 +587,7 @@ public void testDNShouldNotDeleteBlockONTooManyOpenFiles()
593587
}
594588

595589
private void verifyBlockLocations(DistributedFileSystem fs, Path p,
596-
int expected) throws IOException, TimeoutException, InterruptedException {
590+
int expected) throws IOException, TimeoutException, InterruptedException {
597591
final LocatedBlock lb =
598592
fs.getClient().getLocatedBlocks(p.toString(), 0).get(0);
599593
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@@ -621,8 +615,7 @@ public void testNNRpcMetricsWithNonHA() throws IOException {
621615
@Test(timeout = 60000)
622616
public void testSlowMetrics() throws Exception {
623617
DataNodeFaultInjector dnFaultInjector = new DataNodeFaultInjector() {
624-
@Override
625-
public void delay() {
618+
@Override public void delay() {
626619
try {
627620
Thread.sleep(310);
628621
} catch (InterruptedException e) {
@@ -649,8 +642,7 @@ public void delay() {
649642
final AtomicInteger x = new AtomicInteger(0);
650643

651644
GenericTestUtils.waitFor(new Supplier<Boolean>() {
652-
@Override
653-
public Boolean get() {
645+
@Override public Boolean get() {
654646
x.getAndIncrement();
655647
try {
656648
DFSTestUtil
@@ -745,7 +737,7 @@ public void testNodeLocalMetrics() throws Exception {
745737
cluster.waitActive();
746738
FileSystem fs = cluster.getFileSystem();
747739
Path testFile = new Path("/testNodeLocalMetrics.txt");
748-
DFSTestUtil.createFile(fs, testFile, 10L, (short) 1, 1L);
740+
DFSTestUtil.createFile(fs, testFile, 10L, (short)1, 1L);
749741
DFSTestUtil.readFile(fs, testFile);
750742
List<DataNode> datanodes = cluster.getDataNodes();
751743
assertEquals(1, datanodes.size());
@@ -827,4 +819,3 @@ public Boolean get() {
827819
}
828820
}
829821
}
830-

0 commit comments

Comments
 (0)