Skip to content

Commit 586defe

Browse files
committed
HDFS-14979 Allow Balancer to submit getBlocks calls to Observer Nodes when possible. Contributed by Erik Krogen.
1 parent df6b316 commit 586defe

File tree

2 files changed

+23
-0
lines changed

2 files changed

+23
-0
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
2626
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
2727
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
28+
import org.apache.hadoop.hdfs.server.namenode.ha.ReadOnly;
2829
import org.apache.hadoop.io.retry.AtMostOnce;
2930
import org.apache.hadoop.io.retry.Idempotent;
3031
import org.apache.hadoop.security.KerberosInfo;
@@ -78,6 +79,7 @@ public interface NamenodeProtocol {
7879
datanode does not exist
7980
*/
8081
@Idempotent
82+
@ReadOnly
8183
BlocksWithLocations getBlocks(DatanodeInfo datanode, long size, long
8284
minBlockSize) throws IOException;
8385

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,15 @@
1818
package org.apache.hadoop.hdfs.server.balancer;
1919

2020
import static org.junit.Assert.assertEquals;
21+
import static org.mockito.ArgumentMatchers.any;
22+
import static org.mockito.ArgumentMatchers.anyLong;
23+
import static org.mockito.Mockito.times;
24+
import static org.mockito.Mockito.verify;
25+
2126
import java.net.URI;
27+
import java.util.ArrayList;
2228
import java.util.Collection;
29+
import java.util.List;
2330

2431
import org.apache.hadoop.conf.Configuration;
2532
import org.apache.hadoop.fs.FileSystem;
@@ -33,6 +40,8 @@
3340
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
3441
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
3542
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
43+
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
44+
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
3645
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
3746
import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider;
3847
import org.junit.Test;
@@ -128,12 +137,24 @@ public void testBalancerWithObserver() throws Exception {
128137
cluster = qjmhaCluster.getDfsCluster();
129138
cluster.waitClusterUp();
130139
cluster.waitActive();
140+
List<FSNamesystem> namesystemSpies = new ArrayList<>();
141+
for (int i = 0; i < cluster.getNumNameNodes(); i++) {
142+
namesystemSpies.add(
143+
NameNodeAdapter.spyOnNamesystem(cluster.getNameNode(i)));
144+
}
131145

132146
DistributedFileSystem dfs = HATestUtil.configureObserverReadFs(
133147
cluster, conf, ObserverReadProxyProvider.class, true);
134148
client = dfs.getClient().getNamenode();
135149

136150
doTest(conf);
151+
for (int i = 0; i < cluster.getNumNameNodes(); i++) {
152+
// First observer node is at idx 2 so it should get both getBlocks calls
153+
// all other NameNodes should see 0 getBlocks calls
154+
int expectedCount = (i == 2) ? 2 : 0;
155+
verify(namesystemSpies.get(i), times(expectedCount))
156+
.getBlocks(any(), anyLong(), anyLong());
157+
}
137158
} finally {
138159
if (qjmhaCluster != null) {
139160
qjmhaCluster.shutdown();

0 commit comments

Comments
 (0)