Skip to content

Commit 08eba30

Browse files
author
Steve Vaughan Jr
committed
HDFS-16684. Exclude the current JournalNode
Exclude bound local addresses, including the use of a wildcard address in the bound host configurations.
1 parent 92abd99 commit 08eba30

File tree

3 files changed

+64
-8
lines changed

3 files changed

+64
-8
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,11 @@ synchronized Journal getOrCreateJournal(String jid,
124124
return journal;
125125
}
126126

127+
@VisibleForTesting
128+
public JournalNodeSyncer getJournalSyncer(String jid) {
129+
return journalSyncersById.get(jid);
130+
}
131+
127132
@VisibleForTesting
128133
public boolean getJournalSyncerStatus(String jid) {
129134
if (journalSyncersById.get(jid) != null) {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
*/
1818
package org.apache.hadoop.hdfs.qjournal.server;
1919

20+
import org.apache.hadoop.classification.VisibleForTesting;
2021
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
2122
import org.apache.hadoop.classification.InterfaceAudience;
2223
import org.apache.hadoop.conf.Configuration;
@@ -39,6 +40,7 @@
3940
import org.apache.hadoop.security.UserGroupInformation;
4041
import org.apache.hadoop.util.Daemon;
4142
import org.apache.hadoop.util.Lists;
43+
import org.apache.hadoop.util.Sets;
4244
import org.slf4j.Logger;
4345
import org.slf4j.LoggerFactory;
4446

@@ -54,6 +56,7 @@
5456
import java.util.Collection;
5557
import java.util.HashSet;
5658
import java.util.List;
59+
import java.util.Set;
5760

5861
/**
5962
* A Journal Sync thread runs through the lifetime of the JN. It periodically
@@ -153,7 +156,8 @@ private boolean getOtherJournalNodeProxies() {
153156
LOG.warn("Could not add proxy for Journal at addresss " + addr, e);
154157
}
155158
}
156-
if (otherJNProxies.isEmpty()) {
159+
// Check if any of there are any resolvable JournalNodes before starting the sync.
160+
if (otherJNProxies.stream().filter(jnp -> !jnp.jnAddr.isUnresolved()).count() == 0) {
157161
LOG.error("Cannot sync as there is no other JN available for sync.");
158162
return false;
159163
}
@@ -310,12 +314,23 @@ private List<InetSocketAddress> getOtherJournalNodeAddrs() {
310314
return null;
311315
}
312316

313-
private List<InetSocketAddress> getJournalAddrList(String uriStr) throws
317+
@VisibleForTesting
318+
protected List<InetSocketAddress> getJournalAddrList(String uriStr) throws
314319
URISyntaxException,
315320
IOException {
316321
URI uri = new URI(uriStr);
317-
return Util.getLoggerAddresses(uri,
318-
new HashSet<>(Arrays.asList(jn.getBoundIpcAddress())), conf);
322+
323+
InetSocketAddress boundIpcAddress = jn.getBoundIpcAddress();
324+
Set<InetSocketAddress> excluded = Sets.newHashSet(boundIpcAddress);
325+
List<InetSocketAddress> addrList = Util.getLoggerAddresses(uri, excluded, conf);
326+
327+
// Exclude the current JournalNode instance. If we are bound to a local address on the same
328+
// then exclude any port, then remove it from the list since it likely a wildcard address
329+
// (e.g. "0.0.0.0").
330+
addrList.removeIf(addr -> !addr.isUnresolved() && addr.getAddress().isAnyLocalAddress()
331+
&& boundIpcAddress.getPort() == addr.getPort());
332+
333+
return addrList;
319334
}
320335

321336
private void getMissingLogSegments(List<RemoteEditLog> thisJournalEditLogs,

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java

Lines changed: 40 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717
*/
1818
package org.apache.hadoop.hdfs.qjournal.server;
1919

20+
import java.net.InetSocketAddress;
21+
import java.net.URISyntaxException;
2022
import java.util.function.Supplier;
2123
import org.apache.hadoop.conf.Configuration;
2224
import org.apache.hadoop.fs.Path;
@@ -34,6 +36,7 @@
3436
import static org.apache.hadoop.hdfs.server.namenode.FileJournalManager
3537
.getLogFile;
3638
import static org.assertj.core.api.Assertions.assertThat;
39+
import static org.junit.Assert.assertEquals;
3740

3841
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
3942
import org.apache.hadoop.test.GenericTestUtils;
@@ -96,12 +99,45 @@ public void shutDownMiniCluster() throws IOException {
9699
}
97100
}
98101

102+
/**
103+
* Test that the "self exclusion" works when there are multiple JournalNode instances running on
104+
* the same server, but on different ports.
105+
*/
106+
@Test
107+
public void testJournalNodeExcludesSelfMultilpePorts() throws URISyntaxException, IOException {
108+
String uri = qjmhaCluster.getJournalCluster().getQuorumJournalURI("ns1").toString();
109+
JournalNodeSyncer syncer = jCluster.getJournalNode(0).getJournalSyncer("ns1");
110+
111+
// Test: Get the Journal address list for the default configuration
112+
List<InetSocketAddress> addrList = syncer.getJournalAddrList(uri);
113+
114+
// Verify: One of the addresses should be excluded so that the node isn't syncing with itself
115+
assertEquals(2, addrList.size());
116+
}
117+
118+
/**
119+
* Test that the "self exclusion" works when there a host uses a wildcard address.
120+
*/
121+
@Test
122+
public void testJournalNodeExcludesSelfWildCard() throws URISyntaxException, IOException {
123+
String uri = qjmhaCluster.getJournalCluster().getQuorumJournalURI("ns1").toString();
124+
JournalNodeSyncer syncer = jCluster.getJournalNode(0).getJournalSyncer("ns1");
125+
126+
// Test: Request the same Journal address list, but using the IPv4 "0.0.0.0" which is commonly
127+
// used as a bind host.
128+
String boundHostUri = uri.replaceAll("127.0.0.1", "0.0.0.0");
129+
List<InetSocketAddress> boundHostAddrList = syncer.getJournalAddrList(boundHostUri);
130+
131+
// Verify: One of the address should be excluded so that the node isn't syncing with itself
132+
assertEquals(2, boundHostAddrList.size());
133+
}
134+
99135
@Test(timeout=30000)
100136
public void testJournalNodeSync() throws Exception {
101137

102138
//As by default 3 journal nodes are started;
103139
for(int i=0; i<3; i++) {
104-
Assert.assertEquals(true,
140+
assertEquals(true,
105141
jCluster.getJournalNode(i).getJournalSyncerStatus("ns1"));
106142
}
107143

@@ -386,13 +422,13 @@ public void testSyncDuringRollingUpgrade() throws Exception {
386422
HdfsConstants.RollingUpgradeAction.PREPARE);
387423

388424
//query rolling upgrade
389-
Assert.assertEquals(info, dfsActive.rollingUpgrade(
425+
assertEquals(info, dfsActive.rollingUpgrade(
390426
HdfsConstants.RollingUpgradeAction.QUERY));
391427

392428
// Restart the Standby NN with rollingUpgrade option
393429
dfsCluster.restartNameNode(standbyNNindex, true,
394430
"-rollingUpgrade", "started");
395-
Assert.assertEquals(info, dfsActive.rollingUpgrade(
431+
assertEquals(info, dfsActive.rollingUpgrade(
396432
HdfsConstants.RollingUpgradeAction.QUERY));
397433

398434
// Do some edits and delete some edit logs
@@ -420,7 +456,7 @@ public void testSyncDuringRollingUpgrade() throws Exception {
420456
// Restart the current standby NN (previously active)
421457
dfsCluster.restartNameNode(standbyNNindex, true,
422458
"-rollingUpgrade", "started");
423-
Assert.assertEquals(info, dfsActive.rollingUpgrade(
459+
assertEquals(info, dfsActive.rollingUpgrade(
424460
HdfsConstants.RollingUpgradeAction.QUERY));
425461
dfsCluster.waitActive();
426462

0 commit comments

Comments
 (0)