Skip to content

Commit d0b858c

Browse files
authored
Merge branch 'apache:trunk' into YARN-11247
2 parents 0e74c16 + 07581f1 commit d0b858c

File tree

139 files changed

+4412
-734
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

139 files changed

+4412
-734
lines changed

LICENSE-binary

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -305,12 +305,12 @@ net.minidev:json-smart:2.4.7
305305
org.apache.avro:avro:1.9.2
306306
org.apache.commons:commons-collections4:4.2
307307
org.apache.commons:commons-compress:1.21
308-
org.apache.commons:commons-configuration2:2.1.1
308+
org.apache.commons:commons-configuration2:2.8.0
309309
org.apache.commons:commons-csv:1.0
310310
org.apache.commons:commons-digester:1.8.1
311311
org.apache.commons:commons-lang3:3.12.0
312312
org.apache.commons:commons-math3:3.6.1
313-
org.apache.commons:commons-text:1.4
313+
org.apache.commons:commons-text:1.9
314314
org.apache.commons:commons-validator:1.6
315315
org.apache.curator:curator-client:5.2.0
316316
org.apache.curator:curator-framework:5.2.0
@@ -362,7 +362,7 @@ org.ehcache:ehcache:3.3.1
362362
org.lz4:lz4-java:1.7.1
363363
org.objenesis:objenesis:2.6
364364
org.xerial.snappy:snappy-java:1.0.5
365-
org.yaml:snakeyaml:1.31:
365+
org.yaml:snakeyaml:1.32
366366
org.wildfly.openssl:wildfly-openssl:1.0.7.Final
367367

368368

@@ -523,7 +523,7 @@ junit:junit:4.13.2
523523
HSQL License
524524
------------
525525

526-
org.hsqldb:hsqldb:2.3.4
526+
org.hsqldb:hsqldb:2.5.2
527527

528528

529529
JDOM License

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ static void writeResponse(Configuration conf,
9898
if (FORMAT_JSON.equals(format)) {
9999
Configuration.dumpConfiguration(conf, propertyName, out);
100100
} else if (FORMAT_XML.equals(format)) {
101-
conf.writeXml(propertyName, out);
101+
conf.writeXml(propertyName, out, conf);
102102
} else {
103103
throw new BadFormatException("Bad format: " + format);
104104
}

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfigRedactor.java

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
public class ConfigRedactor {
3838

3939
private static final String REDACTED_TEXT = "<redacted>";
40+
private static final String REDACTED_XML = "******";
4041

4142
private List<Pattern> compiledPatterns;
4243

@@ -84,4 +85,19 @@ private boolean configIsSensitive(String key) {
8485
}
8586
return false;
8687
}
88+
89+
/**
90+
* Given a key / value pair, decides whether or not to redact and returns
91+
* either the original value or text indicating it has been redacted.
92+
*
93+
* @param key param key.
94+
* @param value param value, will return if conditions permit.
95+
* @return Original value, or text indicating it has been redacted
96+
*/
97+
public String redactXml(String key, String value) {
98+
if (configIsSensitive(key)) {
99+
return REDACTED_XML;
100+
}
101+
return value;
102+
}
87103
}

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

Lines changed: 19 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3593,11 +3593,13 @@ public void writeXml(Writer out) throws IOException {
35933593
* </ul>
35943594
* @param propertyName xml property name.
35953595
* @param out the writer to write to.
3596+
* @param config configuration.
35963597
* @throws IOException raised on errors performing I/O.
35973598
*/
3598-
public void writeXml(@Nullable String propertyName, Writer out)
3599+
public void writeXml(@Nullable String propertyName, Writer out, Configuration config)
35993600
throws IOException, IllegalArgumentException {
3600-
Document doc = asXmlDocument(propertyName);
3601+
ConfigRedactor redactor = config != null ? new ConfigRedactor(this) : null;
3602+
Document doc = asXmlDocument(propertyName, redactor);
36013603

36023604
try {
36033605
DOMSource source = new DOMSource(doc);
@@ -3614,11 +3616,16 @@ public void writeXml(@Nullable String propertyName, Writer out)
36143616
}
36153617
}
36163618

3619+
public void writeXml(@Nullable String propertyName, Writer out)
3620+
throws IOException, IllegalArgumentException {
3621+
writeXml(propertyName, out, null);
3622+
}
3623+
36173624
/**
36183625
* Return the XML DOM corresponding to this Configuration.
36193626
*/
3620-
private synchronized Document asXmlDocument(@Nullable String propertyName)
3621-
throws IOException, IllegalArgumentException {
3627+
private synchronized Document asXmlDocument(@Nullable String propertyName,
3628+
ConfigRedactor redactor) throws IOException, IllegalArgumentException {
36223629
Document doc;
36233630
try {
36243631
doc = DocumentBuilderFactory
@@ -3641,13 +3648,13 @@ private synchronized Document asXmlDocument(@Nullable String propertyName)
36413648
propertyName + " not found");
36423649
} else {
36433650
// given property is found, write single property
3644-
appendXMLProperty(doc, conf, propertyName);
3651+
appendXMLProperty(doc, conf, propertyName, redactor);
36453652
conf.appendChild(doc.createTextNode("\n"));
36463653
}
36473654
} else {
36483655
// append all elements
36493656
for (Enumeration<Object> e = properties.keys(); e.hasMoreElements();) {
3650-
appendXMLProperty(doc, conf, (String)e.nextElement());
3657+
appendXMLProperty(doc, conf, (String)e.nextElement(), redactor);
36513658
conf.appendChild(doc.createTextNode("\n"));
36523659
}
36533660
}
@@ -3663,7 +3670,7 @@ private synchronized Document asXmlDocument(@Nullable String propertyName)
36633670
* @param propertyName
36643671
*/
36653672
private synchronized void appendXMLProperty(Document doc, Element conf,
3666-
String propertyName) {
3673+
String propertyName, ConfigRedactor redactor) {
36673674
// skip writing if given property name is empty or null
36683675
if (!Strings.isNullOrEmpty(propertyName)) {
36693676
String value = properties.getProperty(propertyName);
@@ -3676,8 +3683,11 @@ private synchronized void appendXMLProperty(Document doc, Element conf,
36763683
propNode.appendChild(nameNode);
36773684

36783685
Element valueNode = doc.createElement("value");
3679-
valueNode.appendChild(doc.createTextNode(
3680-
properties.getProperty(propertyName)));
3686+
String propertyValue = properties.getProperty(propertyName);
3687+
if (redactor != null) {
3688+
propertyValue = redactor.redactXml(propertyName, propertyValue);
3689+
}
3690+
valueNode.appendChild(doc.createTextNode(propertyValue));
36813691
propNode.appendChild(valueNode);
36823692

36833693
Element finalNode = doc.createElement("final");

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1000,6 +1000,7 @@ public class CommonConfigurationKeysPublic {
10001000
String.join(",",
10011001
"secret$",
10021002
"password$",
1003+
"username$",
10031004
"ssl.keystore.pass$",
10041005
"fs.s3.*[Ss]ecret.?[Kk]ey",
10051006
"fs.s3a.*.server-side-encryption.key",

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java

Lines changed: 22 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -396,6 +396,10 @@ public Path getLocalPathForWrite(String pathStr, long size,
396396
Context ctx = confChanged(conf);
397397
int numDirs = ctx.localDirs.length;
398398
int numDirsSearched = 0;
399+
// Max capacity in any directory
400+
long maxCapacity = 0;
401+
String errorText = null;
402+
IOException diskException = null;
399403
//remove the leading slash from the path (to make sure that the uri
400404
//resolution results in a valid path on the dir being checked)
401405
if (pathStr.startsWith("/")) {
@@ -444,9 +448,18 @@ public Path getLocalPathForWrite(String pathStr, long size,
444448
int dirNum = ctx.getAndIncrDirNumLastAccessed(randomInc);
445449
while (numDirsSearched < numDirs) {
446450
long capacity = ctx.dirDF[dirNum].getAvailable();
451+
if (capacity > maxCapacity) {
452+
maxCapacity = capacity;
453+
}
447454
if (capacity > size) {
448-
returnPath =
449-
createPath(ctx.localDirs[dirNum], pathStr, checkWrite);
455+
try {
456+
returnPath = createPath(ctx.localDirs[dirNum], pathStr,
457+
checkWrite);
458+
} catch (IOException e) {
459+
errorText = e.getMessage();
460+
diskException = e;
461+
LOG.debug("DiskException caught for dir {}", ctx.localDirs[dirNum], e);
462+
}
450463
if (returnPath != null) {
451464
ctx.getAndIncrDirNumLastAccessed(numDirsSearched);
452465
break;
@@ -462,8 +475,13 @@ public Path getLocalPathForWrite(String pathStr, long size,
462475
}
463476

464477
//no path found
465-
throw new DiskErrorException("Could not find any valid local " +
466-
"directory for " + pathStr);
478+
String newErrorText = "Could not find any valid local directory for " +
479+
pathStr + " with requested size " + size +
480+
" as the max capacity in any directory is " + maxCapacity;
481+
if (errorText != null) {
482+
newErrorText = newErrorText + " due to " + errorText;
483+
}
484+
throw new DiskErrorException(newErrorText, diskException);
467485
}
468486

469487
/** Creates a file on the local FS. Pass size as

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ public interface StreamCapabilities {
8484
* Support for vectored IO api.
8585
* See {@code PositionedReadable#readVectored(List, IntFunction)}.
8686
*/
87-
String VECTOREDIO = "readvectored";
87+
String VECTOREDIO = "in:readvectored";
8888

8989
/**
9090
* Stream abort() capability implemented by {@link Abortable#abort()}.

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,10 @@
2323
import org.apache.hadoop.classification.InterfaceStability;
2424
import org.apache.hadoop.conf.Configuration;
2525
import org.apache.hadoop.conf.Configured;
26+
import org.apache.hadoop.fs.viewfs.ViewFileSystem;
2627
import org.slf4j.Logger;
2728
import org.slf4j.LoggerFactory;
29+
import static org.apache.hadoop.fs.viewfs.Constants.*;
2830

2931
/**
3032
* Provides a trash facility which supports pluggable Trash policies.
@@ -94,6 +96,27 @@ public static boolean moveToAppropriateTrash(FileSystem fs, Path p,
9496
LOG.warn("Failed to get server trash configuration", e);
9597
throw new IOException("Failed to get server trash configuration", e);
9698
}
99+
100+
/*
101+
* In HADOOP-18144, we changed getTrashRoot() in ViewFileSystem to return a
102+
* viewFS path, instead of a targetFS path. moveToTrash works for
103+
* ViewFileSystem now. ViewFileSystem will do path resolution internally by
104+
* itself.
105+
*
106+
* When localized trash flag is enabled:
107+
* 1). if fs is a ViewFileSystem, we can initialize Trash() with a
108+
* ViewFileSystem object;
109+
* 2). When fs is not a ViewFileSystem, the only place we would need to
110+
* resolve a path is for symbolic links. However, symlink is not
111+
* enabled in Hadoop due to the complexity to support it
112+
* (HADOOP-10019).
113+
*/
114+
if (conf.getBoolean(CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT,
115+
CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT_DEFAULT)) {
116+
Trash trash = new Trash(fs, conf);
117+
return trash.moveToTrash(p);
118+
}
119+
97120
Trash trash = new Trash(fullyResolvedFs, conf);
98121
return trash.moveToTrash(fullyResolvedPath);
99122
}

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/WeakReferenceThreadMap.java

Lines changed: 33 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@
2525

2626
import org.apache.hadoop.util.WeakReferenceMap;
2727

28+
import static java.util.Objects.requireNonNull;
29+
2830
/**
2931
* A WeakReferenceMap for threads.
3032
* @param <V> value type of the map
@@ -36,30 +38,55 @@ public WeakReferenceThreadMap(final Function<? super Long, ? extends V> factory,
3638
super(factory, referenceLost);
3739
}
3840

41+
/**
42+
* Get the value for the current thread, creating if needed.
43+
* @return an instance.
44+
*/
3945
public V getForCurrentThread() {
4046
return get(currentThreadId());
4147
}
4248

49+
/**
50+
* Remove the reference for the current thread.
51+
* @return any reference value which existed.
52+
*/
4353
public V removeForCurrentThread() {
4454
return remove(currentThreadId());
4555
}
4656

57+
/**
58+
* Get the current thread ID.
59+
* @return thread ID.
60+
*/
4761
public long currentThreadId() {
4862
return Thread.currentThread().getId();
4963
}
5064

65+
/**
66+
* Set the new value for the current thread.
67+
* @param newVal new reference to set for the active thread.
68+
* @return the previously set value, possibly null
69+
*/
5170
public V setForCurrentThread(V newVal) {
71+
requireNonNull(newVal);
5272
long id = currentThreadId();
5373

5474
// if the same object is already in the map, just return it.
55-
WeakReference<V> ref = lookup(id);
56-
// Reference value could be set to null. Thus, ref.get() could return
57-
// null. Should be handled accordingly while using the returned value.
58-
if (ref != null && ref.get() == newVal) {
59-
return ref.get();
75+
WeakReference<V> existingWeakRef = lookup(id);
76+
77+
// The looked up reference could be one of
78+
// 1. null: nothing there
79+
// 2. valid but get() == null : reference lost by GC.
80+
// 3. different from the new value
81+
// 4. the same as the old value
82+
if (resolve(existingWeakRef) == newVal) {
83+
// case 4: do nothing, return the new value
84+
return newVal;
85+
} else {
86+
// cases 1, 2, 3: update the map and return the old value
87+
return put(id, newVal);
6088
}
6189

62-
return put(id, newVal);
6390
}
6491

6592
}

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/ExecutorServiceFuturePool.java

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,13 @@
2222
import java.util.Locale;
2323
import java.util.concurrent.ExecutorService;
2424
import java.util.concurrent.Future;
25+
import java.util.concurrent.TimeUnit;
2526
import java.util.function.Supplier;
2627

28+
import org.slf4j.Logger;
29+
30+
import org.apache.hadoop.util.concurrent.HadoopExecutors;
31+
2732
/**
2833
* A FuturePool implementation backed by a java.util.concurrent.ExecutorService.
2934
*
@@ -37,7 +42,8 @@
3742
*
3843
*/
3944
public class ExecutorServiceFuturePool {
40-
private ExecutorService executor;
45+
46+
private final ExecutorService executor;
4147

4248
public ExecutorServiceFuturePool(ExecutorService executor) {
4349
this.executor = executor;
@@ -64,6 +70,18 @@ public Future<Void> executeRunnable(final Runnable r) {
6470
return (Future<Void>) executor.submit(r::run);
6571
}
6672

73+
/**
74+
* Utility to shutdown the {@link ExecutorService} used by this class. Will wait up to a
75+
* certain timeout for the ExecutorService to gracefully shutdown.
76+
*
77+
* @param logger Logger
78+
* @param timeout the maximum time to wait
79+
* @param unit the time unit of the timeout argument
80+
*/
81+
public void shutdown(Logger logger, long timeout, TimeUnit unit) {
82+
HadoopExecutors.shutdown(executor, logger, timeout, unit);
83+
}
84+
6785
public String toString() {
6886
return String.format(Locale.ROOT, "ExecutorServiceFuturePool(executor=%s)", executor);
6987
}

0 commit comments

Comments
 (0)