Skip to content

Commit f841715

Browse files
authored
Merge branch 'trunk' into YARN-11158-V3
2 parents c025e79 + e09e81a commit f841715

File tree

76 files changed

+1009
-596
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

76 files changed

+1009
-596
lines changed

LICENSE-binary

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ com.nimbusds:nimbus-jose-jwt:9.8.1
244244
com.squareup.okhttp3:okhttp:4.10.0
245245
com.squareup.okio:okio:3.2.0
246246
com.zaxxer:HikariCP:4.0.3
247-
commons-beanutils:commons-beanutils:1.9.3
247+
commons-beanutils:commons-beanutils:1.9.4
248248
commons-cli:commons-cli:1.2
249249
commons-codec:commons-codec:1.11
250250
commons-collections:commons-collections:3.2.2

hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ Copies source paths to stdout.
5959

6060
Options
6161

62-
* The `-ignoreCrc` option disables checkshum verification.
62+
* The `-ignoreCrc` option disables checksum verification.
6363

6464
Example:
6565

@@ -73,18 +73,19 @@ Returns 0 on success and -1 on error.
7373
checksum
7474
--------
7575

76-
Usage: `hadoop fs -checksum [-v] URI`
76+
Usage: `hadoop fs -checksum [-v] URI [URI ...]`
7777

78-
Returns the checksum information of a file.
78+
Returns the checksum information of the file(s).
7979

8080
Options
8181

82-
* The `-v` option displays blocks size for the file.
82+
* The `-v` option displays blocks size for the file(s).
8383

8484
Example:
8585

8686
* `hadoop fs -checksum hdfs://nn1.example.com/file1`
8787
* `hadoop fs -checksum file:///etc/hosts`
88+
* `hadoop fs -checksum file:///etc/hosts hdfs://nn1.example.com/file1`
8889

8990
chgrp
9091
-----
@@ -177,7 +178,7 @@ Returns 0 on success and -1 on error.
177178
cp
178179
----
179180

180-
Usage: `hadoop fs -cp [-f] [-p | -p[topax]] [-t <thread count>] [-q <thread pool queue size>] URI [URI ...] <dest>`
181+
Usage: `hadoop fs -cp [-f] [-p | -p[topax]] [-d] [-t <thread count>] [-q <thread pool queue size>] URI [URI ...] <dest>`
181182

182183
Copy files from source to destination. This command allows multiple sources as well in which case the destination must be a directory.
183184

@@ -187,13 +188,14 @@ Options:
187188

188189
* `-f` : Overwrite the destination if it already exists.
189190
* `-d` : Skip creation of temporary file with the suffix `._COPYING_`.
190-
* `-p` : Preserve file attributes [topx] (timestamps, ownership, permission, ACL, XAttr). If -p is specified with no *arg*, then preserves timestamps, ownership, permission. If -pa is specified, then preserves permission also because ACL is a super-set of permission. Determination of whether raw namespace extended attributes are preserved is independent of the -p flag.
191+
* `-p` : Preserve file attributes [topax] (timestamps, ownership, permission, ACL, XAttr). If -p is specified with no *arg*, then preserves timestamps, ownership, permission. If -pa is specified, then preserves permission also because ACL is a super-set of permission. Determination of whether raw namespace extended attributes are preserved is independent of the -p flag.
191192
* `-t <thread count>` : Number of threads to be used, default is 1. Useful when copying directories containing more than 1 file.
192193
* `-q <thread pool queue size>` : Thread pool queue size to be used, default is 1024. It takes effect only when thread count greater than 1.
193194

194195
Example:
195196

196197
* `hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2`
198+
* `hadoop fs -cp -f -d /user/hadoop/file1 /user/hadoop/file2`
197199
* `hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir`
198200
* `hadoop fs -cp -t 5 /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir`
199201
* `hadoop fs -cp -t 10 -q 2048 /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir`
@@ -403,7 +405,7 @@ Returns 0 on success and non-zero on error.
403405
getmerge
404406
--------
405407

406-
Usage: `hadoop fs -getmerge [-nl] <src> <localdst>`
408+
Usage: `hadoop fs -getmerge [-nl] [-skip-empty-file] <src> <localdst>`
407409

408410
Takes a source directory and a destination file as input and concatenates files in src into the destination local file. Optionally -nl can be set to enable adding a newline character (LF) at the end of each file.
409411
-skip-empty-file can be used to avoid unwanted newline characters in case of empty files.
@@ -412,6 +414,7 @@ Examples:
412414

413415
* `hadoop fs -getmerge -nl /src /opt/output.txt`
414416
* `hadoop fs -getmerge -nl /src/file1.txt /src/file2.txt /output.txt`
417+
* `hadoop fs -getmerge -nl -skip-empty-file /src/file1.txt /src/file2.txt /output.txt`
415418

416419
Exit Code:
417420

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -226,13 +226,14 @@ public ConnectionContext getConnection(UserGroupInformation ugi,
226226
this.pools.put(connectionId, pool);
227227
this.connectionPoolToNamespaceMap.put(connectionId, nsId);
228228
}
229-
long clientStateId = RouterStateIdContext.getClientStateIdFromCurrentCall(nsId);
230-
pool.getPoolAlignmentContext().advanceClientStateId(clientStateId);
231229
} finally {
232230
writeLock.unlock();
233231
}
234232
}
235233

234+
long clientStateId = RouterStateIdContext.getClientStateIdFromCurrentCall(nsId);
235+
pool.getPoolAlignmentContext().advanceClientStateId(clientStateId);
236+
236237
ConnectionContext conn = pool.getConnection();
237238

238239
// Add a new connection to the pool if it wasn't usable

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PoolAlignmentContext.java

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@
2020

2121
import java.io.IOException;
2222
import java.util.concurrent.atomic.LongAccumulator;
23+
24+
import org.apache.hadoop.classification.VisibleForTesting;
2325
import org.apache.hadoop.ipc.AlignmentContext;
2426
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos;
2527

@@ -99,4 +101,9 @@ public boolean isCoordinatedCall(String protocolName, String method) {
99101
public void advanceClientStateId(Long clientStateId) {
100102
poolLocalStateId.accumulate(clientStateId);
101103
}
104+
105+
@VisibleForTesting
106+
public long getPoolLocalStateId() {
107+
return this.poolLocalStateId.get();
108+
}
102109
}

hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,11 @@
1818
package org.apache.hadoop.hdfs.server.federation.router;
1919

2020
import org.apache.hadoop.conf.Configuration;
21+
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RouterFederatedStateProto;
2122
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
2223
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
24+
import org.apache.hadoop.ipc.RPC;
25+
import org.apache.hadoop.ipc.Server;
2326
import org.apache.hadoop.net.NetUtils;
2427
import org.apache.hadoop.security.UserGroupInformation;
2528
import org.apache.hadoop.test.GenericTestUtils;
@@ -31,6 +34,7 @@
3134
import org.junit.rules.ExpectedException;
3235

3336
import java.io.IOException;
37+
import java.util.HashMap;
3438
import java.util.Map;
3539
import java.util.concurrent.ArrayBlockingQueue;
3640
import java.util.concurrent.BlockingQueue;
@@ -305,6 +309,51 @@ private void checkPoolConnections(UserGroupInformation ugi,
305309
}
306310
}
307311

312+
@Test
313+
public void testAdvanceClientStateId() throws IOException {
314+
// Start one ConnectionManager
315+
Configuration tmpConf = new Configuration();
316+
ConnectionManager tmpConnManager = new ConnectionManager(tmpConf);
317+
tmpConnManager.start();
318+
Map<ConnectionPoolId, ConnectionPool> poolMap = tmpConnManager.getPools();
319+
320+
// Mock one Server.Call with FederatedNamespaceState that ns0 = 1L.
321+
Server.Call mockCall1 = new Server.Call(1, 1, null, null,
322+
RPC.RpcKind.RPC_BUILTIN, new byte[] {1, 2, 3});
323+
Map<String, Long> nsStateId = new HashMap<>();
324+
nsStateId.put("ns0", 1L);
325+
RouterFederatedStateProto.Builder stateBuilder = RouterFederatedStateProto.newBuilder();
326+
nsStateId.forEach(stateBuilder::putNamespaceStateIds);
327+
mockCall1.setFederatedNamespaceState(stateBuilder.build().toByteString());
328+
329+
Server.getCurCall().set(mockCall1);
330+
331+
// Create one new connection pool
332+
tmpConnManager.getConnection(TEST_USER1, TEST_NN_ADDRESS, NamenodeProtocol.class, "ns0");
333+
assertEquals(1, poolMap.size());
334+
ConnectionPoolId connectionPoolId = new ConnectionPoolId(TEST_USER1,
335+
TEST_NN_ADDRESS, NamenodeProtocol.class);
336+
ConnectionPool pool = poolMap.get(connectionPoolId);
337+
assertEquals(1L, pool.getPoolAlignmentContext().getPoolLocalStateId());
338+
339+
// Mock one Server.Call with FederatedNamespaceState that ns0 = 2L.
340+
Server.Call mockCall2 = new Server.Call(2, 1, null, null,
341+
RPC.RpcKind.RPC_BUILTIN, new byte[] {1, 2, 3});
342+
nsStateId.clear();
343+
nsStateId.put("ns0", 2L);
344+
stateBuilder = RouterFederatedStateProto.newBuilder();
345+
nsStateId.forEach(stateBuilder::putNamespaceStateIds);
346+
mockCall2.setFederatedNamespaceState(stateBuilder.build().toByteString());
347+
348+
Server.getCurCall().set(mockCall2);
349+
350+
// Get one existed connection for ns0
351+
tmpConnManager.getConnection(TEST_USER1, TEST_NN_ADDRESS, NamenodeProtocol.class, "ns0");
352+
assertEquals(1, poolMap.size());
353+
pool = poolMap.get(connectionPoolId);
354+
assertEquals(2L, pool.getPoolAlignmentContext().getPoolLocalStateId());
355+
}
356+
308357
@Test
309358
public void testConfigureConnectionActiveRatio() throws IOException {
310359
// test 1 conn below the threshold and these conns are closed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,15 @@ protected void processConf() {
114114
numBlocksPerCheck =
115115
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT;
116116
}
117+
118+
final String deprecatedKey = "dfs.namenode.decommission.nodes.per.interval";
119+
final String strNodes = conf.get(deprecatedKey);
120+
if (strNodes != null) {
121+
LOG.warn("Deprecated configuration key {} will be ignored.", deprecatedKey);
122+
LOG.warn("Please update your configuration to use {} instead.",
123+
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
124+
}
125+
117126
LOG.info("Initialized the Default Decommission and Maintenance monitor");
118127
}
119128

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java

Lines changed: 1 addition & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -108,32 +108,6 @@ void activate(Configuration conf) {
108108
Preconditions.checkArgument(intervalSecs >= 0, "Cannot set a negative " +
109109
"value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY);
110110

111-
int blocksPerInterval = conf.getInt(
112-
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
113-
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT);
114-
115-
final String deprecatedKey =
116-
"dfs.namenode.decommission.nodes.per.interval";
117-
final String strNodes = conf.get(deprecatedKey);
118-
if (strNodes != null) {
119-
LOG.warn("Deprecated configuration key {} will be ignored.",
120-
deprecatedKey);
121-
LOG.warn("Please update your configuration to use {} instead.",
122-
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
123-
}
124-
125-
Preconditions.checkArgument(blocksPerInterval > 0,
126-
"Must set a positive value for "
127-
+ DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
128-
129-
final int maxConcurrentTrackedNodes = conf.getInt(
130-
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES,
131-
DFSConfigKeys
132-
.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT);
133-
Preconditions.checkArgument(maxConcurrentTrackedNodes >= 0,
134-
"Cannot set a negative value for "
135-
+ DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES);
136-
137111
Class cls = null;
138112
try {
139113
cls = conf.getClass(
@@ -152,12 +126,7 @@ void activate(Configuration conf) {
152126
executor.scheduleWithFixedDelay(monitor, intervalSecs, intervalSecs,
153127
TimeUnit.SECONDS);
154128

155-
if (LOG.isDebugEnabled()) {
156-
LOG.debug("Activating DatanodeAdminManager with interval {} seconds, " +
157-
"{} max blocks per interval, " +
158-
"{} max concurrently tracked nodes.", intervalSecs,
159-
blocksPerInterval, maxConcurrentTrackedNodes);
160-
}
129+
LOG.debug("Activating DatanodeAdminManager with interval {} seconds.", intervalSecs);
161130
}
162131

163132
/**

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminMonitorBase.java

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,10 @@ public void setConf(Configuration conf) {
123123
DFSConfigKeys
124124
.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT;
125125
}
126+
127+
LOG.debug("Activating DatanodeAdminMonitor with {} max concurrently tracked nodes.",
128+
maxConcurrentTrackedNodes);
129+
126130
processConf();
127131
}
128132

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1138,9 +1138,11 @@ public ForkJoinPool getAddReplicaThreadPool() {
11381138
}
11391139

11401140
@VisibleForTesting
1141-
public static void reInitializeAddReplicaThreadPool() {
1142-
addReplicaThreadPool.shutdown();
1143-
addReplicaThreadPool = null;
1141+
public synchronized static void reInitializeAddReplicaThreadPool() {
1142+
if (addReplicaThreadPool != null) {
1143+
addReplicaThreadPool.shutdown();
1144+
addReplicaThreadPool = null;
1145+
}
11441146
}
11451147

11461148
@VisibleForTesting

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9034,9 +9034,15 @@ private boolean isObserver() {
90349034

90359035
private void checkBlockLocationsWhenObserver(LocatedBlocks blocks, String src)
90369036
throws ObserverRetryOnActiveException {
9037-
for (LocatedBlock b : blocks.getLocatedBlocks()) {
9038-
if (b.getLocations() == null || b.getLocations().length == 0) {
9039-
throw new ObserverRetryOnActiveException("Zero blocklocations for " + src);
9037+
if (blocks == null) {
9038+
return;
9039+
}
9040+
List<LocatedBlock> locatedBlockList = blocks.getLocatedBlocks();
9041+
if (locatedBlockList != null) {
9042+
for (LocatedBlock b : locatedBlockList) {
9043+
if (b.getLocations() == null || b.getLocations().length == 0) {
9044+
throw new ObserverRetryOnActiveException("Zero blocklocations for " + src);
9045+
}
90409046
}
90419047
}
90429048
}

0 commit comments

Comments
 (0)