Skip to content

Commit 1691ccc

Browse files
authored
HDFS-16738. Invalid CallerContext caused NullPointerException (#4791)
1 parent 880686d commit 1691ccc

File tree

2 files changed

+74
-4
lines changed

2 files changed

+74
-4
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -541,18 +541,21 @@ public static String parseSpecialValue(String content, String key) {
541541
* @return The actual client's machine.
542542
*/
543543
public static String getClientMachine(final String[] ipProxyUsers) {
544+
String clientMachine = null;
544545
String cc = clientInfoFromContext(ipProxyUsers);
545546
if (cc != null) {
546547
// if the rpc has a caller context of "clientIp:1.2.3.4,CLI",
547548
// return "1.2.3.4" as the client machine.
548549
String key = CallerContext.CLIENT_IP_STR +
549550
CallerContext.Builder.KEY_VALUE_SEPARATOR;
550-
return parseSpecialValue(cc, key);
551+
clientMachine = parseSpecialValue(cc, key);
551552
}
552553

553-
String clientMachine = Server.getRemoteAddress();
554-
if (clientMachine == null) { //not a RPC client
555-
clientMachine = "";
554+
if (clientMachine == null) {
555+
clientMachine = Server.getRemoteAddress();
556+
if (clientMachine == null) { //not a RPC client
557+
clientMachine = "";
558+
}
556559
}
557560
return clientMachine;
558561
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,22 +28,29 @@
2828
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
2929
import static org.junit.Assert.assertEquals;
3030
import static org.junit.Assert.assertNotEquals;
31+
import static org.junit.Assert.assertNotNull;
3132

3233
import java.io.IOException;
3334
import java.nio.charset.StandardCharsets;
35+
import java.security.PrivilegedExceptionAction;
3436

3537
import org.apache.hadoop.conf.Configuration;
3638
import org.apache.hadoop.fs.FSDataOutputStream;
39+
import org.apache.hadoop.fs.FileSystem;
3740
import org.apache.hadoop.fs.Path;
41+
import org.apache.hadoop.fs.permission.FsPermission;
3842
import org.apache.hadoop.hdfs.DFSTestUtil;
3943
import org.apache.hadoop.hdfs.DistributedFileSystem;
4044
import org.apache.hadoop.hdfs.HdfsConfiguration;
4145
import org.apache.hadoop.hdfs.MiniDFSCluster;
4246

4347
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
48+
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
4449
import org.apache.hadoop.ipc.CallerContext;
4550
import org.apache.hadoop.security.UserGroupInformation;
51+
import org.apache.hadoop.test.GenericTestUtils;
4652
import org.junit.Test;
53+
import org.junit.jupiter.api.Timeout;
4754

4855
public class TestNameNodeRpcServer {
4956

@@ -91,6 +98,66 @@ private static String getPreferredLocation(DistributedFileSystem fs,
9198
// trials. 1/3^20=3e-10, so that should be good enough.
9299
static final int ITERATIONS_TO_USE = 20;
93100

101+
@Test
102+
@Timeout(30000)
103+
public void testNamenodeRpcClientIpProxyWithFailBack() throws Exception {
104+
// Make 3 nodes & racks so that we have a decent shot of detecting when
105+
// our change overrides the random choice of datanode.
106+
Configuration conf = new HdfsConfiguration();
107+
conf.set(DFS_NAMENODE_IP_PROXY_USERS, "fake_joe");
108+
final CallerContext original = CallerContext.getCurrent();
109+
110+
MiniQJMHACluster qjmhaCluster = null;
111+
try {
112+
String baseDir = GenericTestUtils.getRandomizedTempPath();
113+
MiniQJMHACluster.Builder builder = new MiniQJMHACluster.Builder(conf);
114+
builder.getDfsBuilder().numDataNodes(3);
115+
qjmhaCluster = builder.baseDir(baseDir).build();
116+
MiniDFSCluster dfsCluster = qjmhaCluster.getDfsCluster();
117+
dfsCluster.waitActive();
118+
dfsCluster.transitionToActive(0);
119+
120+
// Set the caller context to set the ip address
121+
CallerContext.setCurrent(
122+
new CallerContext.Builder("test", conf)
123+
.build());
124+
125+
dfsCluster.getFileSystem(0).setPermission(
126+
new Path("/"), FsPermission.getDirDefault());
127+
128+
// Run as fake joe to authorize the test
129+
UserGroupInformation joe =
130+
UserGroupInformation.createUserForTesting("fake_joe",
131+
new String[]{"fake_group"});
132+
133+
FileSystem joeFs = joe.doAs((PrivilegedExceptionAction<FileSystem>) () ->
134+
FileSystem.get(dfsCluster.getURI(0), conf));
135+
136+
Path testPath = new Path("/foo");
137+
// Write a sample file
138+
FSDataOutputStream stream = joeFs.create(testPath);
139+
stream.write("Hello world!\n".getBytes(StandardCharsets.UTF_8));
140+
stream.close();
141+
142+
qjmhaCluster.getDfsCluster().transitionToStandby(0);
143+
qjmhaCluster.getDfsCluster().transitionToActive(1);
144+
145+
DistributedFileSystem nn1 = dfsCluster.getFileSystem(1);
146+
assertNotNull(nn1.getFileStatus(testPath));
147+
} finally {
148+
CallerContext.setCurrent(original);
149+
if (qjmhaCluster != null) {
150+
try {
151+
qjmhaCluster.shutdown();
152+
} catch (IOException e) {
153+
e.printStackTrace();
154+
}
155+
}
156+
// Reset the config
157+
conf.unset(DFS_NAMENODE_IP_PROXY_USERS);
158+
}
159+
}
160+
94161
/**
95162
* A test to make sure that if an authorized user adds "clientIp:" to their
96163
* caller context, it will be used to make locality decisions on the NN.

0 commit comments

Comments
 (0)