Skip to content

Commit f216d13

Browse files
committed
Expand Assertions.*
1 parent bd67d1f commit f216d13

File tree

6 files changed

+55
-36
lines changed

6 files changed

+55
-36
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,10 @@
1919

2020
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
2121
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
22-
import static org.junit.jupiter.api.Assertions.*;
22+
import static org.junit.jupiter.api.Assertions.assertEquals;
23+
import static org.junit.jupiter.api.Assertions.assertFalse;
24+
import static org.junit.jupiter.api.Assertions.assertTrue;
25+
import static org.junit.jupiter.api.Assertions.fail;
2326

2427
import java.io.File;
2528
import java.io.IOException;
@@ -80,8 +83,7 @@ void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
8083
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
8184
break;
8285
case DATA_NODE:
83-
assertEquals(
84-
UpgradeUtilities.checksumContents(nodeType, curDir, false),
86+
assertEquals(UpgradeUtilities.checksumContents(nodeType, curDir, false),
8587
UpgradeUtilities.checksumMasterDataNodeContents());
8688
break;
8789
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java

Lines changed: 28 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,6 @@
7272
import org.apache.hadoop.util.ToolRunner;
7373
import org.junit.jupiter.api.AfterAll;
7474
import org.junit.jupiter.api.BeforeAll;
75-
import org.junit.jupiter.api.Assertions;
7675
import org.slf4j.event.Level;
7776

7877
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
@@ -81,8 +80,14 @@
8180
import static org.apache.hadoop.fs.permission.AclEntryType.*;
8281
import static org.apache.hadoop.fs.permission.FsAction.*;
8382
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
84-
import static org.junit.jupiter.api.Assertions.*;
8583
import static org.assertj.core.api.Assertions.assertThat;
84+
import static org.junit.jupiter.api.Assertions.assertEquals;
85+
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
86+
import static org.junit.jupiter.api.Assertions.assertFalse;
87+
import static org.junit.jupiter.api.Assertions.assertNotNull;
88+
import static org.junit.jupiter.api.Assertions.assertThrows;
89+
import static org.junit.jupiter.api.Assertions.assertTrue;
90+
import static org.junit.jupiter.api.Assertions.fail;
8691

8792
/**
8893
* This class tests commands from DFSShell.
@@ -561,10 +566,10 @@ public void testPut() throws IOException {
561566
try (FSDataOutputStream out = dfs.create(dst, false)) {
562567
// It should fail to create a new client writing to the same file.
563568
try(DFSClient client = new DFSClient(dfs.getUri(), dfs.getConf())) {
564-
final RemoteException e = Assertions.assertThrows(RemoteException.class,
569+
final RemoteException e = assertThrows(RemoteException.class,
565570
() -> client.create(dst.toString(), false));
566571
LOG.info("GOOD", e);
567-
Assertions.assertEquals(e.getClassName(), AlreadyBeingCreatedException.class.getName());
572+
assertEquals(e.getClassName(), AlreadyBeingCreatedException.class.getName());
568573
}
569574
// It should succeed to continue writing to the file.
570575
out.writeUTF(hello);
@@ -1121,7 +1126,7 @@ public void testChecksum() throws Exception {
11211126
assertTrue(out.toString().contains(StringUtils.byteToHexString(checksum.getBytes(),
11221127
0, checksum.getLength())));
11231128
} finally {
1124-
Assertions.assertNotNull(printStream);
1129+
assertNotNull(printStream);
11251130
System.setOut(printStream);
11261131
}
11271132
}
@@ -1964,22 +1969,22 @@ public void testGet() throws IOException {
19641969
public String run(int exitcode, String... options) throws IOException {
19651970
String dst = new File(TEST_ROOT_DIR, fname + ++count)
19661971
.getAbsolutePath();
1967-
String[] args = new String[options.length + 3];
1968-
args[0] = "-get";
1969-
args[args.length - 2] = remotef.toString();
1970-
args[args.length - 1] = dst;
1971-
for(int i = 0; i < options.length; i++) {
1972-
args[i + 1] = options[i];
1973-
}
1974-
show("args=" + Arrays.asList(args));
1975-
1976-
try {
1977-
assertEquals(exitcode, shell.run(args));
1978-
} catch (Exception e) {
1972+
String[] args = new String[options.length + 3];
1973+
args[0] = "-get";
1974+
args[args.length - 2] = remotef.toString();
1975+
args[args.length - 1] = dst;
1976+
for (int i = 0; i < options.length; i++) {
1977+
args[i + 1] = options[i];
1978+
}
1979+
show("args=" + Arrays.asList(args));
1980+
1981+
try {
1982+
assertEquals(exitcode, shell.run(args));
1983+
} catch (Exception e) {
19791984
assertTrue(false, StringUtils.stringifyException(e));
1980-
}
1981-
return exitcode == 0? DFSTestUtil.readFile(new File(dst)): null;
1982-
}
1985+
}
1986+
return exitcode == 0 ? DFSTestUtil.readFile(new File(dst)) : null;
1987+
}
19831988
};
19841989

19851990
File localf = createLocalFile(new File(TEST_ROOT_DIR, fname));
@@ -1988,7 +1993,7 @@ public String run(int exitcode, String... options) throws IOException {
19881993

19891994
try {
19901995
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true)
1991-
.build();
1996+
.build();
19921997
dfs = cluster.getFileSystem();
19931998

19941999
mkdir(dfs, root);
@@ -2017,13 +2022,13 @@ public String run(int exitcode, String... options) throws IOException {
20172022

20182023
// Start the miniCluster again, but do not reformat, so prior files remain.
20192024
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(false)
2020-
.build();
2025+
.build();
20212026
dfs = cluster.getFileSystem();
20222027

20232028
assertEquals(null, runner.run(1));
20242029
String corruptedcontent = runner.run(0, "-ignoreCrc");
20252030
assertEquals(localfcontent.substring(1), corruptedcontent.substring(1));
2026-
assertEquals(localfcontent.charAt(0)+1, corruptedcontent.charAt(0));
2031+
assertEquals(localfcontent.charAt(0) + 1, corruptedcontent.charAt(0));
20272032
} finally {
20282033
if (null != dfs) {
20292034
try {

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,10 @@
5050
import org.apache.log4j.Logger;
5151
import org.junit.jupiter.api.Test;
5252

53-
import static org.junit.jupiter.api.Assertions.*;
53+
import static org.junit.jupiter.api.Assertions.assertEquals;
54+
import static org.junit.jupiter.api.Assertions.assertNull;
55+
import static org.junit.jupiter.api.Assertions.assertTrue;
56+
import static org.junit.jupiter.api.Assertions.fail;
5457

5558
/**
5659
* This tests data transfer protocol handling in the Datanode. It sends

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,12 @@
5858
import static org.apache.hadoop.fs.permission.FsAction.NONE;
5959
import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
6060
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
61-
import static org.junit.jupiter.api.Assertions.*;
61+
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
62+
import static org.junit.jupiter.api.Assertions.assertEquals;
63+
import static org.junit.jupiter.api.Assertions.assertFalse;
64+
import static org.junit.jupiter.api.Assertions.assertNotNull;
65+
import static org.junit.jupiter.api.Assertions.assertTrue;
66+
import static org.junit.jupiter.api.Assertions.fail;
6267

6368
/**
6469
* Test after enable Erasure Coding on cluster, exercise Java API make sure they

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@
4242
import org.apache.hadoop.security.UserGroupInformation;
4343
import org.apache.hadoop.test.GenericTestUtils;
4444
import org.junit.jupiter.api.AfterEach;
45-
import org.junit.jupiter.api.Assertions;
4645
import org.junit.jupiter.api.BeforeEach;
4746
import org.junit.jupiter.api.Test;
4847

@@ -56,7 +55,12 @@
5655
import java.util.Map;
5756

5857
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
59-
import static org.junit.jupiter.api.Assertions.*;
58+
import static org.junit.jupiter.api.Assertions.assertEquals;
59+
import static org.junit.jupiter.api.Assertions.assertFalse;
60+
import static org.junit.jupiter.api.Assertions.assertNotNull;
61+
import static org.junit.jupiter.api.Assertions.assertNull;
62+
import static org.junit.jupiter.api.Assertions.assertTrue;
63+
import static org.junit.jupiter.api.Assertions.fail;
6064
import org.junit.jupiter.api.Timeout;
6165

6266
@Timeout(60)
@@ -224,10 +228,10 @@ public void testBasicSetECPolicy()
224228

225229
// Already set directory-level policies should still be in effect
226230
Path disabledPolicy = new Path(dir1, "afterDisabled");
227-
Assertions.assertEquals(ecPolicy, fs.getErasureCodingPolicy(dir1),
231+
assertEquals(ecPolicy, fs.getErasureCodingPolicy(dir1),
228232
"Dir does not have policy set");
229233
fs.create(disabledPolicy).close();
230-
Assertions.assertEquals(ecPolicy, fs.getErasureCodingPolicy(disabledPolicy),
234+
assertEquals(ecPolicy, fs.getErasureCodingPolicy(disabledPolicy),
231235
"File did not inherit dir's policy");
232236

233237
// Also check loading disabled EC policies from fsimage
@@ -236,9 +240,9 @@ public void testBasicSetECPolicy()
236240
fs.setSafeMode(SafeModeAction.LEAVE);
237241
cluster.restartNameNodes();
238242

239-
Assertions.assertEquals(ecPolicy, fs.getErasureCodingPolicy(dir1),
243+
assertEquals(ecPolicy, fs.getErasureCodingPolicy(dir1),
240244
"Dir does not have policy set");
241-
Assertions.assertEquals(ecPolicy, fs.getErasureCodingPolicy(disabledPolicy),
245+
assertEquals(ecPolicy, fs.getErasureCodingPolicy(disabledPolicy),
242246
"File does not have policy set");
243247
}
244248

@@ -758,7 +762,7 @@ public void testAddErasureCodingPolicies() throws Exception {
758762
for (int cellSize: cellSizes) {
759763
try {
760764
new ErasureCodingPolicy(toAddSchema, cellSize);
761-
Assertions.fail("Invalid cell size should be detected.");
765+
fail("Invalid cell size should be detected.");
762766
} catch (Exception e){
763767
GenericTestUtils.assertExceptionContains("cellSize must be", e);
764768
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
import org.apache.hadoop.thirdparty.protobuf.ByteString;
3838

3939
import org.junit.jupiter.api.Test;
40-
import static org.junit.jupiter.api.Assertions.*;
40+
import static org.junit.jupiter.api.Assertions.assertEquals;
4141

4242
/**
4343
* Verify compatible FileStatus/HdfsFileStatus serialization.

0 commit comments

Comments
 (0)