Skip to content

Commit 6dcc795

Browse files
committed
HDFS-7943. Append cannot handle the last block with length greater than the preferred block size. Contributed by Jing Zhao.
(cherry picked from commit bee5a6a)
1 parent 79c07bb commit 6dcc795

File tree

3 files changed

+44
-2
lines changed

3 files changed

+44
-2
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

+3
Original file line numberDiff line numberDiff line change
@@ -896,6 +896,9 @@ Release 2.7.0 - UNRELEASED
896896
HDFS-7945. The WebHdfs system on DN does not honor the length parameter.
897897
(wheat9)
898898

899+
HDFS-7943. Append cannot handle the last block with length greater than
900+
the preferred block size. (jing9)
901+
899902
BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
900903

901904
HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java

+24-2
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,16 @@
3434

3535
import static org.apache.hadoop.util.Time.now;
3636

37+
/**
38+
* Restrictions for a concat operation:
39+
* <pre>
40+
* 1. the src file and the target file are in the same dir
41+
* 2. all the source files are not in snapshot
42+
* 3. any source file cannot be the same with the target file
43+
* 4. source files cannot be under construction or empty
44+
* 5. source file's preferred block size cannot be greater than the target file
45+
* </pre>
46+
*/
3747
class FSDirConcatOp {
3848

3949
static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs,
@@ -123,14 +133,25 @@ private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs,
123133
throw new SnapshotException("Concat: the source file " + src
124134
+ " is referred by some other reference in some snapshot.");
125135
}
136+
// source file cannot be the same with the target file
126137
if (srcINode == targetINode) {
127138
throw new HadoopIllegalArgumentException("concat: the src file " + src
128139
+ " is the same with the target file " + targetIIP.getPath());
129140
}
141+
// source file cannot be under construction or empty
130142
if(srcINodeFile.isUnderConstruction() || srcINodeFile.numBlocks() == 0) {
131143
throw new HadoopIllegalArgumentException("concat: source file " + src
132144
+ " is invalid or empty or underConstruction");
133145
}
146+
// source file's preferred block size cannot be greater than the target
147+
// file
148+
if (srcINodeFile.getPreferredBlockSize() >
149+
targetINode.getPreferredBlockSize()) {
150+
throw new HadoopIllegalArgumentException("concat: source file " + src
151+
+ " has preferred block size " + srcINodeFile.getPreferredBlockSize()
152+
+ " which is greater than the target file's preferred block size "
153+
+ targetINode.getPreferredBlockSize());
154+
}
134155
si.add(srcINodeFile);
135156
}
136157

@@ -143,9 +164,10 @@ private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs,
143164
return si.toArray(new INodeFile[si.size()]);
144165
}
145166

146-
private static QuotaCounts computeQuotaDeltas(FSDirectory fsd, INodeFile target, INodeFile[] srcList) {
167+
private static QuotaCounts computeQuotaDeltas(FSDirectory fsd,
168+
INodeFile target, INodeFile[] srcList) {
147169
QuotaCounts deltas = new QuotaCounts.Builder().build();
148-
short targetRepl = target.getBlockReplication();
170+
final short targetRepl = target.getBlockReplication();
149171
for (INodeFile src : srcList) {
150172
short srcRepl = src.getBlockReplication();
151173
long fileSize = src.computeFileSize();

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java

+17
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@
4444
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
4545
import org.apache.hadoop.ipc.RemoteException;
4646
import org.apache.hadoop.security.UserGroupInformation;
47+
import org.apache.hadoop.test.GenericTestUtils;
4748
import org.junit.After;
4849
import org.junit.Assert;
4950
import org.junit.Before;
@@ -388,6 +389,22 @@ public void testIllegalArg() throws IOException {
388389
} catch (Exception e) {
389390
// exspected
390391
}
392+
393+
// the source file's preferred block size cannot be greater than the target
394+
{
395+
final Path src1 = new Path(parentDir, "src1");
396+
DFSTestUtil.createFile(dfs, src1, fileLen, REPL_FACTOR, 0L);
397+
final Path src2 = new Path(parentDir, "src2");
398+
// create a file whose preferred block size is greater than the target
399+
DFSTestUtil.createFile(dfs, src2, 1024, fileLen,
400+
dfs.getDefaultBlockSize(trg) * 2, REPL_FACTOR, 0L);
401+
try {
402+
dfs.concat(trg, new Path[] {src1, src2});
403+
fail("didn't fail for src with greater preferred block size");
404+
} catch (Exception e) {
405+
GenericTestUtils.assertExceptionContains("preferred block size", e);
406+
}
407+
}
391408
}
392409

393410
/**

0 commit comments

Comments
 (0)