Skip to content

Commit 653264f

Browse files
authored
Merge branch 'apache:trunk' into YARN-11332
2 parents 3791f09 + 136291d commit 653264f

File tree

74 files changed

+3086
-337
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

74 files changed

+3086
-337
lines changed

LICENSE-binary

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ com.cedarsoftware:java-util:1.9.0
220220
com.cedarsoftware:json-io:2.5.1
221221
com.fasterxml.jackson.core:jackson-annotations:2.12.7
222222
com.fasterxml.jackson.core:jackson-core:2.12.7
223-
com.fasterxml.jackson.core:jackson-databind:2.12.7
223+
com.fasterxml.jackson.core:jackson-databind:2.12.7.1
224224
com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.12.7
225225
com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.12.7
226226
com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.12.7
@@ -306,7 +306,7 @@ org.apache.avro:avro:1.9.2
306306
org.apache.commons:commons-collections4:4.2
307307
org.apache.commons:commons-compress:1.21
308308
org.apache.commons:commons-configuration2:2.8.0
309-
org.apache.commons:commons-csv:1.0
309+
org.apache.commons:commons-csv:1.9.0
310310
org.apache.commons:commons-digester:1.8.1
311311
org.apache.commons:commons-lang3:3.12.0
312312
org.apache.commons:commons-math3:3.6.1

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,12 +44,7 @@ public class InstrumentedReadLock extends InstrumentedLock {
4444
* there can be multiple threads that hold the read lock concurrently.
4545
*/
4646
private final ThreadLocal<Long> readLockHeldTimeStamp =
47-
new ThreadLocal<Long>() {
48-
@Override
49-
protected Long initialValue() {
50-
return Long.MAX_VALUE;
51-
};
52-
};
47+
ThreadLocal.withInitial(() -> Long.MAX_VALUE);
5348

5449
public InstrumentedReadLock(String name, Logger logger,
5550
ReentrantReadWriteLock readWriteLock,

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,9 @@
3737
@InterfaceStability.Unstable
3838
public class InstrumentedWriteLock extends InstrumentedLock {
3939

40+
private final ReentrantReadWriteLock readWriteLock;
41+
private volatile long writeLockHeldTimeStamp = 0;
42+
4043
public InstrumentedWriteLock(String name, Logger logger,
4144
ReentrantReadWriteLock readWriteLock,
4245
long minLoggingGapMs, long lockWarningThresholdMs) {
@@ -50,5 +53,28 @@ public InstrumentedWriteLock(String name, Logger logger,
5053
long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
5154
super(name, logger, readWriteLock.writeLock(), minLoggingGapMs,
5255
lockWarningThresholdMs, clock);
56+
this.readWriteLock = readWriteLock;
57+
}
58+
59+
@Override
60+
public void unlock() {
61+
boolean needReport = readWriteLock.getWriteHoldCount() == 1;
62+
long localWriteReleaseTime = getTimer().monotonicNow();
63+
long localWriteAcquireTime = writeLockHeldTimeStamp;
64+
getLock().unlock();
65+
if (needReport) {
66+
writeLockHeldTimeStamp = 0;
67+
check(localWriteAcquireTime, localWriteReleaseTime, true);
68+
}
69+
}
70+
71+
/**
72+
* Starts timing for the instrumented write lock.
73+
*/
74+
@Override
75+
protected void startLockTiming() {
76+
if (readWriteLock.getWriteHoldCount() == 1) {
77+
writeLockHeldTimeStamp = getTimer().monotonicNow();
78+
}
5379
}
5480
}

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/XMLUtils.java

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -41,17 +41,18 @@
4141
@InterfaceStability.Unstable
4242
public class XMLUtils {
4343

44-
private static final String DISALLOW_DOCTYPE_DECL =
44+
public static final String DISALLOW_DOCTYPE_DECL =
4545
"http://apache.org/xml/features/disallow-doctype-decl";
46-
private static final String LOAD_EXTERNAL_DECL =
46+
public static final String LOAD_EXTERNAL_DECL =
4747
"http://apache.org/xml/features/nonvalidating/load-external-dtd";
48-
private static final String EXTERNAL_GENERAL_ENTITIES =
48+
public static final String EXTERNAL_GENERAL_ENTITIES =
4949
"http://xml.org/sax/features/external-general-entities";
50-
private static final String EXTERNAL_PARAMETER_ENTITIES =
50+
public static final String EXTERNAL_PARAMETER_ENTITIES =
5151
"http://xml.org/sax/features/external-parameter-entities";
52-
private static final String CREATE_ENTITY_REF_NODES =
52+
public static final String CREATE_ENTITY_REF_NODES =
5353
"http://apache.org/xml/features/dom/create-entity-ref-nodes";
54-
54+
public static final String VALIDATION =
55+
"http://xml.org/sax/features/validation";
5556

5657
/**
5758
* Transform input xml given a stylesheet.

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java

Lines changed: 107 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -233,4 +233,111 @@ protected void logWarning(long lockHeldTime, SuppressedSnapshot stats) {
233233
assertEquals(2, wlogged.get());
234234
assertEquals(1, wsuppresed.get());
235235
}
236+
237+
238+
/**
239+
* Tests the warning when the write lock is held longer than threshold.
240+
*/
241+
@Test(timeout=10000)
242+
public void testWriteLockLongHoldingReportWithReentrant() {
243+
String testname = name.getMethodName();
244+
final AtomicLong time = new AtomicLong(0);
245+
Timer mclock = new Timer() {
246+
@Override
247+
public long monotonicNow() {
248+
return time.get();
249+
}
250+
};
251+
252+
final AtomicLong wlogged = new AtomicLong(0);
253+
final AtomicLong wsuppresed = new AtomicLong(0);
254+
final AtomicLong totalHeldTime = new AtomicLong(0);
255+
ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(true);
256+
InstrumentedWriteLock writeLock = new InstrumentedWriteLock(testname, LOG,
257+
readWriteLock, 2000, 300, mclock) {
258+
@Override
259+
protected void logWarning(long lockHeldTime, SuppressedSnapshot stats) {
260+
totalHeldTime.addAndGet(lockHeldTime);
261+
wlogged.incrementAndGet();
262+
wsuppresed.set(stats.getSuppressedCount());
263+
}
264+
};
265+
266+
InstrumentedReadLock readLock = new InstrumentedReadLock(testname, LOG,
267+
readWriteLock, 2000, 300, mclock) {
268+
@Override
269+
protected void logWarning(long lockHeldTime, SuppressedSnapshot stats) {
270+
totalHeldTime.addAndGet(lockHeldTime);
271+
wlogged.incrementAndGet();
272+
wsuppresed.set(stats.getSuppressedCount());
273+
}
274+
};
275+
276+
writeLock.lock(); // t = 0
277+
time.set(100);
278+
279+
writeLock.lock(); // t = 100
280+
time.set(500);
281+
282+
writeLock.lock(); // t = 500
283+
time.set(2900);
284+
writeLock.unlock(); // t = 2900
285+
286+
readLock.lock(); // t = 2900
287+
time.set(3000);
288+
readLock.unlock(); // t = 3000
289+
290+
writeLock.unlock(); // t = 3000
291+
292+
writeLock.unlock(); // t = 3000
293+
assertEquals(1, wlogged.get());
294+
assertEquals(0, wsuppresed.get());
295+
assertEquals(3000, totalHeldTime.get());
296+
}
297+
298+
/**
299+
* Tests the warning when the read lock is held longer than threshold.
300+
*/
301+
@Test(timeout=10000)
302+
public void testReadLockLongHoldingReportWithReentrant() {
303+
String testname = name.getMethodName();
304+
final AtomicLong time = new AtomicLong(0);
305+
Timer mclock = new Timer() {
306+
@Override
307+
public long monotonicNow() {
308+
return time.get();
309+
}
310+
};
311+
312+
final AtomicLong wlogged = new AtomicLong(0);
313+
final AtomicLong wsuppresed = new AtomicLong(0);
314+
final AtomicLong totalHelpTime = new AtomicLong(0);
315+
ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(true);
316+
InstrumentedReadLock readLock = new InstrumentedReadLock(testname, LOG,
317+
readWriteLock, 2000, 300, mclock) {
318+
@Override
319+
protected void logWarning(long lockHeldTime, SuppressedSnapshot stats) {
320+
totalHelpTime.addAndGet(lockHeldTime);
321+
wlogged.incrementAndGet();
322+
wsuppresed.set(stats.getSuppressedCount());
323+
}
324+
};
325+
326+
readLock.lock(); // t = 0
327+
time.set(100);
328+
329+
readLock.lock(); // t = 100
330+
time.set(500);
331+
332+
readLock.lock(); // t = 500
333+
time.set(3000);
334+
readLock.unlock(); // t = 3000
335+
336+
readLock.unlock(); // t = 3000
337+
338+
readLock.unlock(); // t = 3000
339+
assertEquals(1, wlogged.get());
340+
assertEquals(0, wsuppresed.get());
341+
assertEquals(3000, totalHelpTime.get());
342+
}
236343
}

hadoop-project/pom.xml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@
7070

7171
<!-- jackson versions -->
7272
<jackson2.version>2.12.7</jackson2.version>
73-
<jackson2.databind.version>2.12.7</jackson2.databind.version>
73+
<jackson2.databind.version>2.12.7.1</jackson2.databind.version>
7474

7575
<!-- httpcomponents versions -->
7676
<httpclient.version>4.5.13</httpclient.version>
@@ -117,7 +117,7 @@
117117
<commons-codec.version>1.15</commons-codec.version>
118118
<commons-collections.version>3.2.2</commons-collections.version>
119119
<commons-compress.version>1.21</commons-compress.version>
120-
<commons-csv.version>1.0</commons-csv.version>
120+
<commons-csv.version>1.9.0</commons-csv.version>
121121
<commons-io.version>2.11.0</commons-io.version>
122122
<commons-lang3.version>3.12.0</commons-lang3.version>
123123
<commons-logging.version>1.1.3</commons-logging.version>

hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -640,7 +640,10 @@ public static AWSCredentialProviderList buildAWSProviderList(
640640
AWSCredentialProviderList providers = new AWSCredentialProviderList();
641641
for (Class<?> aClass : awsClasses) {
642642

643-
if (aClass.getName().contains(AWS_AUTH_CLASS_PREFIX)) {
643+
// List of V1 credential providers that will be migrated with V2 upgrade
644+
if (!Arrays.asList("EnvironmentVariableCredentialsProvider",
645+
"EC2ContainerCredentialsProviderWrapper", "InstanceProfileCredentialsProvider")
646+
.contains(aClass.getSimpleName()) && aClass.getName().contains(AWS_AUTH_CLASS_PREFIX)) {
644647
V2Migration.v1ProviderReferenced(aClass.getName());
645648
}
646649

hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3_select.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -934,6 +934,21 @@ Caused by: com.amazonaws.services.s3.model.AmazonS3Exception: GZIP is not applic
934934
...
935935
```
936936
937+
938+
### AWSBadRequestException `UnsupportedStorageClass`
939+
940+
S3 Select doesn't work with some storage classes like Glacier or Reduced Redundancy.
941+
Make sure you've set `fs.s3a.create.storage.class` to a supported storage class for S3 Select.
942+
943+
```
944+
org.apache.hadoop.fs.s3a.AWSBadRequestException:
945+
Select on s3a://example/dataset.csv.gz:
946+
com.amazonaws.services.s3.model.AmazonS3Exception:
947+
We do not support REDUCED_REDUNDANCY storage class.
948+
Please check the service documentation and try again.
949+
(Service: Amazon S3; Status Code: 400; Error Code: UnsupportedStorageClass
950+
```
951+
937952
### `PathIOException`: "seek() not supported"
938953
939954
The input stream returned by the select call does not support seeking

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/AbstractS3SelectTest.java

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,9 @@
6060
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
6161
import org.apache.hadoop.util.DurationInfo;
6262

63+
import static org.apache.hadoop.fs.s3a.Constants.STORAGE_CLASS;
6364
import static org.apache.hadoop.fs.s3a.S3ATestUtils.getLandsatCSVPath;
65+
import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides;
6466
import static org.apache.hadoop.fs.s3a.select.CsvFile.ALL_QUOTES;
6567
import static org.apache.hadoop.fs.s3a.select.SelectConstants.*;
6668
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
@@ -280,6 +282,14 @@ boolean isSelectAvailable(final FileSystem filesystem) {
280282
.hasCapability(S3_SELECT_CAPABILITY);
281283
}
282284

285+
@Override
286+
protected Configuration createConfiguration() {
287+
Configuration conf = super.createConfiguration();
288+
removeBaseAndBucketOverrides(conf, STORAGE_CLASS);
289+
290+
return conf;
291+
}
292+
283293
/**
284294
* Setup: requires select to be available.
285295
*/

hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@
3636
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
3737
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
3838
import org.apache.hadoop.yarn.sls.SLSRunner;
39+
import org.slf4j.Logger;
40+
import org.slf4j.LoggerFactory;
3941

4042
@Private
4143
@Unstable
@@ -45,6 +47,7 @@ public class SLSCapacityScheduler extends CapacityScheduler implements
4547
private final SLSSchedulerCommons schedulerCommons;
4648
private Configuration conf;
4749
private SLSRunner runner;
50+
private static final Logger LOG = LoggerFactory.getLogger(SLSCapacityScheduler.class);
4851

4952
public SLSCapacityScheduler() {
5053
schedulerCommons = new SLSSchedulerCommons(this);
@@ -105,7 +108,12 @@ public boolean tryCommit(Resource cluster, ResourceCommitRequest r,
105108

106109
@Override
107110
public void handle(SchedulerEvent schedulerEvent) {
108-
schedulerCommons.handle(schedulerEvent);
111+
try {
112+
schedulerCommons.handle(schedulerEvent);
113+
} catch(Exception e) {
114+
LOG.error("Caught exception while handling scheduler event", e);
115+
throw e;
116+
}
109117
}
110118

111119
@Override

0 commit comments

Comments
 (0)