|
22 | 22 | import static org.junit.Assert.fail; |
23 | 23 |
|
24 | 24 | import java.io.IOException; |
| 25 | +import java.util.Collections; |
25 | 26 | import java.util.concurrent.TimeUnit; |
26 | 27 | import java.util.concurrent.TimeoutException; |
27 | 28 | import java.util.concurrent.atomic.AtomicInteger; |
|
33 | 34 | import org.apache.hadoop.fs.permission.FsPermission; |
34 | 35 | import org.apache.hadoop.hdfs.DistributedFileSystem; |
35 | 36 | import org.apache.hadoop.hdfs.MiniDFSCluster; |
| 37 | +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; |
36 | 38 | import org.apache.hadoop.hdfs.protocol.HdfsConstants; |
37 | 39 | import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; |
38 | 40 | import org.apache.hadoop.hdfs.server.namenode.NameNode; |
39 | 41 | import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; |
| 42 | +import org.apache.hadoop.ipc.RemoteException; |
40 | 43 | import org.apache.hadoop.ipc.RpcScheduler; |
41 | 44 | import org.apache.hadoop.ipc.Schedulable; |
| 45 | +import org.apache.hadoop.ipc.StandbyException; |
42 | 46 | import org.apache.hadoop.test.GenericTestUtils; |
43 | 47 | import org.apache.hadoop.util.Time; |
44 | 48 | import org.junit.After; |
@@ -344,6 +348,39 @@ public void testUncoordinatedCall() throws Exception { |
344 | 348 | reader.interrupt(); |
345 | 349 | } |
346 | 350 |
|
| 351 | + @Test |
| 352 | + public void testRequestFromNonObserverProxyProvider() throws Exception { |
| 353 | + // Create another HDFS client using ConfiguredFailoverProvider |
| 354 | + Configuration conf2 = new Configuration(conf); |
| 355 | + |
| 356 | + // Populate the above configuration with only a single observer in the |
| 357 | + // namenode list. Also reduce retries to make test finish faster. |
| 358 | + HATestUtil.setFailoverConfigurations( |
| 359 | + conf2, |
| 360 | + HATestUtil.getLogicalHostname(dfsCluster), |
| 361 | + Collections.singletonList( |
| 362 | + dfsCluster.getNameNode(2).getNameNodeAddress()), |
| 363 | + ConfiguredFailoverProxyProvider.class); |
| 364 | + conf2.setBoolean("fs.hdfs.impl.disable.cache", true); |
| 365 | + conf2.setInt(HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY, 1); |
| 366 | + conf2.setInt(HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY, 1); |
| 367 | + FileSystem dfs2 = FileSystem.get(conf2); |
| 368 | + |
| 369 | + dfs.mkdir(testPath, FsPermission.getDefault()); |
| 370 | + dfsCluster.rollEditLogAndTail(0); |
| 371 | + |
| 372 | + try { |
| 373 | + // Request should be rejected by observer and throw StandbyException |
| 374 | + dfs2.listStatus(testPath); |
| 375 | + fail("listStatus should have thrown exception"); |
| 376 | + } catch (RemoteException re) { |
| 377 | + IOException e = re.unwrapRemoteException(); |
| 378 | + assertTrue("should have thrown StandbyException but got " |
| 379 | + + e.getClass().getSimpleName(), |
| 380 | + e instanceof StandbyException); |
| 381 | + } |
| 382 | + } |
| 383 | + |
347 | 384 | private void assertSentTo(int nnIdx) throws IOException { |
348 | 385 | assertTrue("Request was not sent to the expected namenode " + nnIdx, |
349 | 386 | HATestUtil.isSentToAnyOfNameNodes(dfs, dfsCluster, nnIdx)); |
|
0 commit comments