Skip to content

Commit 43b41f2

Browse files
author
Colin Patrick Mccabe
committed
HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade starts (Zhe Zhang via Colin P. McCabe)
1 parent bee5a6a commit 43b41f2

File tree

5 files changed

+92
-6
lines changed

5 files changed

+92
-6
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1202,6 +1202,9 @@ Release 2.7.0 - UNRELEASED
12021202
HDFS-7943. Append cannot handle the last block with length greater than
12031203
the preferred block size. (jing9)
12041204

1205+
HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade
1206+
starts (Zhe Zhang via Colin P. McCabe)
1207+
12051208
BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
12061209

12071210
HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -406,7 +406,7 @@ void doUpgrade(FSNamesystem target) throws IOException {
406406
for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
407407
StorageDirectory sd = it.next();
408408
try {
409-
NNUpgradeUtil.doPreUpgrade(sd);
409+
NNUpgradeUtil.doPreUpgrade(conf, sd);
410410
} catch (Exception e) {
411411
LOG.error("Failed to move aside pre-upgrade storage " +
412412
"in image directory " + sd.getRoot(), e);

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -581,7 +581,7 @@ public String toString() {
581581
public void doPreUpgrade() throws IOException {
582582
LOG.info("Starting upgrade of edits directory " + sd.getRoot());
583583
try {
584-
NNUpgradeUtil.doPreUpgrade(sd);
584+
NNUpgradeUtil.doPreUpgrade(conf, sd);
585585
} catch (IOException ioe) {
586586
LOG.error("Failed to move aside pre-upgrade storage " +
587587
"in image directory " + sd.getRoot(), ioe);

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java

Lines changed: 38 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,15 +18,19 @@
1818
package org.apache.hadoop.hdfs.server.namenode;
1919

2020
import java.io.File;
21+
import java.io.FilenameFilter;
2122
import java.io.IOException;
23+
import java.util.List;
2224

2325
import org.apache.commons.logging.Log;
2426
import org.apache.commons.logging.LogFactory;
27+
import org.apache.hadoop.conf.Configuration;
2528
import org.apache.hadoop.hdfs.server.common.Storage;
2629
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
2730
import org.apache.hadoop.hdfs.server.common.StorageInfo;
2831

2932
import com.google.common.base.Preconditions;
33+
import org.apache.hadoop.io.IOUtils;
3034

3135
abstract class NNUpgradeUtil {
3236

@@ -99,15 +103,17 @@ static void doFinalize(StorageDirectory sd) throws IOException {
99103
* a call to any JM's or local storage dir's doPreUpgrade method fails, then
100104
* doUpgrade will not be called for any JM. The existing current dir is
101105
* renamed to previous.tmp, and then a new, empty current dir is created.
102-
*
106+
*
107+
* @param conf configuration for creating {@link EditLogFileOutputStream}
103108
* @param sd the storage directory to perform the pre-upgrade procedure.
104109
* @throws IOException in the event of error
105110
*/
106-
static void doPreUpgrade(StorageDirectory sd) throws IOException {
111+
static void doPreUpgrade(Configuration conf, StorageDirectory sd)
112+
throws IOException {
107113
LOG.info("Starting upgrade of storage directory " + sd.getRoot());
108114
File curDir = sd.getCurrentDir();
109115
File prevDir = sd.getPreviousDir();
110-
File tmpDir = sd.getPreviousTmp();
116+
final File tmpDir = sd.getPreviousTmp();
111117

112118
Preconditions.checkState(curDir.exists(),
113119
"Current directory must exist for preupgrade.");
@@ -123,6 +129,35 @@ static void doPreUpgrade(StorageDirectory sd) throws IOException {
123129
if (!curDir.mkdir()) {
124130
throw new IOException("Cannot create directory " + curDir);
125131
}
132+
133+
List<String> fileNameList = IOUtils.listDirectory(tmpDir, new FilenameFilter() {
134+
@Override
135+
public boolean accept(File dir, String name) {
136+
return dir.equals(tmpDir)
137+
&& name.startsWith(NNStorage.NameNodeFile.EDITS.getName());
138+
}
139+
});
140+
141+
for (String s : fileNameList) {
142+
File prevFile = new File(tmpDir, s);
143+
Preconditions.checkState(prevFile.canRead(),
144+
"Edits log file " + s + " is not readable.");
145+
File newFile = new File(curDir, prevFile.getName());
146+
Preconditions.checkState(newFile.createNewFile(),
147+
"Cannot create new edits log file in " + curDir);
148+
EditLogFileInputStream in = new EditLogFileInputStream(prevFile);
149+
EditLogFileOutputStream out =
150+
new EditLogFileOutputStream(conf, newFile, 512*1024);
151+
FSEditLogOp logOp = in.nextValidOp();
152+
while (logOp != null) {
153+
out.write(logOp);
154+
logOp = in.nextOp();
155+
}
156+
out.setReadyToFlush();
157+
out.flushAndSync(true);
158+
out.close();
159+
in.close();
160+
}
126161
}
127162

128163
/**

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java

Lines changed: 49 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,10 @@
2828
import static org.junit.Assert.fail;
2929

3030
import java.io.File;
31+
import java.io.FilenameFilter;
3132
import java.io.IOException;
33+
import java.util.LinkedList;
34+
import java.util.List;
3235
import java.util.regex.Pattern;
3336

3437
import org.apache.commons.logging.Log;
@@ -42,7 +45,9 @@
4245
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
4346
import org.apache.hadoop.hdfs.server.common.Storage;
4447
import org.apache.hadoop.hdfs.server.common.StorageInfo;
48+
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
4549
import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite;
50+
import org.apache.hadoop.io.IOUtils;
4651
import org.apache.hadoop.ipc.RemoteException;
4752
import org.apache.hadoop.util.StringUtils;
4853
import org.junit.BeforeClass;
@@ -450,7 +455,50 @@ public void test203LayoutVersion() {
450455
assertTrue(Storage.is203LayoutVersion(lv));
451456
}
452457
}
453-
458+
459+
@Test
460+
public void testPreserveEditLogs() throws Exception {
461+
conf = new HdfsConfiguration();
462+
conf = UpgradeUtilities.initializeStorageStateConf(1, conf);
463+
String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
464+
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
465+
466+
log("Normal NameNode upgrade", 1);
467+
File[] created =
468+
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
469+
List<String> beforeUpgrade = new LinkedList<>();
470+
for (final File createdDir : created) {
471+
List<String> fileNameList =
472+
IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
473+
beforeUpgrade.addAll(fileNameList);
474+
}
475+
476+
cluster = createCluster();
477+
478+
List<String> afterUpgrade = new LinkedList<>();
479+
for (final File createdDir : created) {
480+
List<String> fileNameList =
481+
IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
482+
afterUpgrade.addAll(fileNameList);
483+
}
484+
485+
for (String s : beforeUpgrade) {
486+
assertTrue(afterUpgrade.contains(s));
487+
}
488+
489+
cluster.shutdown();
490+
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
491+
}
492+
493+
private static enum EditLogsFilter implements FilenameFilter {
494+
INSTANCE;
495+
496+
@Override
497+
public boolean accept(File dir, String name) {
498+
return name.startsWith(NNStorage.NameNodeFile.EDITS.getName());
499+
}
500+
}
501+
454502
public static void main(String[] args) throws Exception {
455503
TestDFSUpgrade t = new TestDFSUpgrade();
456504
TestDFSUpgrade.initialize();

0 commit comments

Comments
 (0)