Skip to content

Commit d0cf014

Browse files
authored
Merge branch 'trunk' into YARN-11158-V3
2 parents c7742c6 + 7d39abd commit d0cf014

File tree

20 files changed

+963
-164
lines changed

20 files changed

+963
-164
lines changed

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeFloat.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ private final boolean compareAndSet(float expect, float update) {
6969

7070
private void incr(float delta) {
7171
while (true) {
72-
float current = value.get();
72+
float current = Float.intBitsToFloat(value.get());
7373
float next = current + delta;
7474
if (compareAndSet(current, next)) {
7575
setChanged();

hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2166,6 +2166,13 @@ The switch to turn S3A auditing on or off.
21662166
<description>The AbstractFileSystem for gs: uris.</description>
21672167
</property>
21682168

2169+
<property>
2170+
<name>fs.azure.enable.readahead</name>
2171+
<value>false</value>
2172+
<description>Disable readahead/prefetching in AbfsInputStream.
2173+
See HADOOP-18521</description>
2174+
</property>
2175+
21692176
<property>
21702177
<name>io.seqfile.compress.blocksize</name>
21712178
<value>1000000</value>

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818

1919
package org.apache.hadoop.metrics2.lib;
2020

21+
import static org.apache.hadoop.metrics2.impl.MsInfo.Context;
2122
import static org.apache.hadoop.metrics2.lib.Interns.info;
2223
import static org.apache.hadoop.test.MetricsAsserts.*;
2324
import static org.mockito.AdditionalMatchers.eq;
@@ -500,4 +501,15 @@ public void testMutableQuantilesEmptyRollover() throws Exception {
500501
verify(mb, times(2)).addGauge(
501502
info("FooNumOps", "Number of ops for stat with 5s interval"), (long) 0);
502503
}
504+
505+
/**
506+
* Test {@link MutableGaugeFloat#incr()}.
507+
*/
508+
@Test(timeout = 30000)
509+
public void testMutableGaugeFloat() {
510+
MutableGaugeFloat mgf = new MutableGaugeFloat(Context, 3.2f);
511+
assertEquals(3.2f, mgf.value(), 0.0);
512+
mgf.incr();
513+
assertEquals(4.2f, mgf.value(), 0.0);
514+
}
503515
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -912,6 +912,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
912912
fsNamesys.getFSImage().updateStorageVersion();
913913
fsNamesys.getFSImage().renameCheckpoint(NameNodeFile.IMAGE_ROLLBACK,
914914
NameNodeFile.IMAGE);
915+
fsNamesys.setNeedRollbackFsImage(false);
915916
break;
916917
}
917918
case OP_ADD_CACHE_DIRECTIVE: {

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@
3333
import javax.management.ReflectionException;
3434
import javax.management.openmbean.CompositeDataSupport;
3535

36+
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
37+
import org.apache.hadoop.hdfs.server.namenode.NameNode;
38+
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
3639
import org.junit.Rule;
3740
import org.junit.rules.TemporaryFolder;
3841
import org.slf4j.Logger;
@@ -720,6 +723,39 @@ static void queryForPreparation(DistributedFileSystem dfs) throws IOException,
720723
}
721724
}
722725

726+
@Test
727+
public void testEditLogTailerRollingUpgrade() throws IOException, InterruptedException {
728+
Configuration conf = new Configuration();
729+
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
730+
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
731+
732+
HAUtil.setAllowStandbyReads(conf, true);
733+
734+
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
735+
.nnTopology(MiniDFSNNTopology.simpleHATopology())
736+
.numDataNodes(0)
737+
.build();
738+
cluster.waitActive();
739+
740+
cluster.transitionToActive(0);
741+
742+
NameNode nn1 = cluster.getNameNode(0);
743+
NameNode nn2 = cluster.getNameNode(1);
744+
try {
745+
// RU start should trigger rollback image in standbycheckpointer
746+
nn1.getRpcServer().rollingUpgrade(HdfsConstants.RollingUpgradeAction.PREPARE);
747+
HATestUtil.waitForStandbyToCatchUp(nn1, nn2);
748+
Assert.assertTrue(nn2.getNamesystem().isNeedRollbackFsImage());
749+
750+
// RU finalize should reset rollback image flag in standbycheckpointer
751+
nn1.getRpcServer().rollingUpgrade(HdfsConstants.RollingUpgradeAction.FINALIZE);
752+
HATestUtil.waitForStandbyToCatchUp(nn1, nn2);
753+
Assert.assertFalse(nn2.getNamesystem().isNeedRollbackFsImage());
754+
} finally {
755+
cluster.shutdown();
756+
}
757+
}
758+
723759
/**
724760
* In non-HA setup, after rolling upgrade prepare, the Secondary NN should
725761
* still be able to do checkpoint

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -331,6 +331,8 @@ void buildPackage()
331331
LOG.info("Compressing tarball");
332332
try (TarArchiveOutputStream out = new TarArchiveOutputStream(
333333
targetStream)) {
334+
// Workaround for the compress issue present from 1.21: COMPRESS-587
335+
out.setBigNumberMode(TarArchiveOutputStream.BIGNUMBER_STAR);
334336
for (String fullPath : filteredInputFiles) {
335337
LOG.info("Adding " + fullPath);
336338
File file = new File(fullPath);

hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ public final class FileSystemConfigurations {
106106
public static final boolean DEFAULT_ABFS_LATENCY_TRACK = false;
107107
public static final long DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS = 120;
108108

109-
public static final boolean DEFAULT_ENABLE_READAHEAD = true;
109+
public static final boolean DEFAULT_ENABLE_READAHEAD = false;
110110
public static final String DEFAULT_FS_AZURE_USER_AGENT_PREFIX = EMPTY_STRING;
111111
public static final String DEFAULT_VALUE_UNKNOWN = "UNKNOWN";
112112

hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamContext.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ public class AbfsInputStreamContext extends AbfsStreamContext {
3535

3636
private boolean tolerateOobAppends;
3737

38-
private boolean isReadAheadEnabled = true;
38+
private boolean isReadAheadEnabled = false;
3939

4040
private boolean alwaysReadBufferSize;
4141

hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractSeek.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434

3535
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_READ_AHEAD_RANGE;
3636
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_READ_BUFFER_SIZE;
37+
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ENABLE_READAHEAD;
3738
import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.MIN_BUFFER_SIZE;
3839
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
3940
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
@@ -68,6 +69,7 @@ protected Configuration createConfiguration() {
6869
protected AbstractFSContract createContract(final Configuration conf) {
6970
conf.setInt(AZURE_READ_AHEAD_RANGE, MIN_BUFFER_SIZE);
7071
conf.setInt(AZURE_READ_BUFFER_SIZE, MIN_BUFFER_SIZE);
72+
conf.setBoolean(FS_AZURE_ENABLE_READAHEAD, true);
7173
return new AbfsFileSystemContract(conf, isSecure);
7274
}
7375

hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,7 @@ private AbfsClient getMockAbfsClient() {
106106
private AbfsInputStream getAbfsInputStream(AbfsClient mockAbfsClient,
107107
String fileName) throws IOException {
108108
AbfsInputStreamContext inputStreamContext = new AbfsInputStreamContext(-1);
109+
inputStreamContext.isReadAheadEnabled(true);
109110
// Create AbfsInputStream with the client instance
110111
AbfsInputStream inputStream = new AbfsInputStream(
111112
mockAbfsClient,
@@ -131,6 +132,7 @@ public AbfsInputStream getAbfsInputStream(AbfsClient abfsClient,
131132
boolean alwaysReadBufferSize,
132133
int readAheadBlockSize) throws IOException {
133134
AbfsInputStreamContext inputStreamContext = new AbfsInputStreamContext(-1);
135+
inputStreamContext.isReadAheadEnabled(true);
134136
// Create AbfsInputStream with the client instance
135137
AbfsInputStream inputStream = new AbfsInputStream(
136138
abfsClient,

0 commit comments

Comments
 (0)