Skip to content

Commit 0dbe1d3

Browse files
authored
HADOOP-18668. Path capability probe for truncate is only honored by RawLocalFileSystem (#5492)
1 parent 9a8287c commit 0dbe1d3

File tree

7 files changed

+42
-0
lines changed

7 files changed

+42
-0
lines changed

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@ public static Optional<Boolean> hasPathCapability(final Path path,
5353
case CommonPathCapabilities.FS_SNAPSHOTS:
5454
case CommonPathCapabilities.FS_STORAGEPOLICY:
5555
case CommonPathCapabilities.FS_XATTRS:
56+
case CommonPathCapabilities.FS_TRUNCATE:
5657
return Optional.of(true);
5758
case CommonPathCapabilities.FS_SYMLINKS:
5859
return Optional.of(FileSystem.areSymlinksEnabled());

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1646,6 +1646,7 @@ public boolean hasPathCapability(final Path path, final String capability)
16461646
case CommonPathCapabilities.FS_SNAPSHOTS:
16471647
case CommonPathCapabilities.FS_STORAGEPOLICY:
16481648
case CommonPathCapabilities.FS_XATTRS:
1649+
case CommonPathCapabilities.FS_TRUNCATE:
16491650
return true;
16501651
case CommonPathCapabilities.FS_SYMLINKS:
16511652
return false;

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
import org.apache.hadoop.fs.BlockLocation;
2323
import org.apache.hadoop.fs.BlockStoragePolicySpi;
2424
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
25+
import org.apache.hadoop.fs.CommonPathCapabilities;
2526
import org.apache.hadoop.fs.ContentSummary;
2627
import org.apache.hadoop.fs.FSDataOutputStream;
2728
import org.apache.hadoop.fs.FileChecksum;
@@ -302,9 +303,17 @@ private void testTruncate() throws Exception {
302303
AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
303304

304305
fs.close();
306+
assertPathCapabilityForTruncate(file);
305307
}
306308
}
307309

310+
private void assertPathCapabilityForTruncate(Path file) throws Exception {
311+
FileSystem fs = this.getHttpFSFileSystem();
312+
assertTrue("HttpFS/WebHdfs/SWebHdfs support truncate",
313+
fs.hasPathCapability(file, CommonPathCapabilities.FS_TRUNCATE));
314+
fs.close();
315+
}
316+
308317
private void testConcat() throws Exception {
309318
Configuration config = getProxiedFSConf();
310319
config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323
import java.util.function.Supplier;
2424
import org.apache.hadoop.conf.Configuration;
25+
import org.apache.hadoop.fs.CommonPathCapabilities;
2526
import org.apache.hadoop.fs.FSDataOutputStream;
2627
import org.apache.hadoop.fs.FileSystem;
2728
import org.apache.hadoop.fs.FileSystemTestHelper;
@@ -103,6 +104,8 @@ public void testTruncateWithViewFileSystem()
103104
out.writeBytes("drtatedasfdasfgdfas");
104105
out.close();
105106
int newLength = 10;
107+
assertTrue("ViewFS supports truncate",
108+
fsView.hasPathCapability(filePath, CommonPathCapabilities.FS_TRUNCATE));
106109
boolean isReady = fsView.truncate(filePath, newLength);
107110
if (!isReady) {
108111
GenericTestUtils.waitFor(new Supplier<Boolean>() {

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystem.java

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020
import org.apache.hadoop.conf.Configuration;
2121
import org.apache.hadoop.fs.CommonConfigurationKeys;
22+
import org.apache.hadoop.fs.CommonPathCapabilities;
2223
import org.apache.hadoop.fs.FileSystem;
2324
import org.apache.hadoop.fs.Options;
2425
import org.apache.hadoop.fs.Path;
@@ -191,4 +192,21 @@ public void testQuota() throws IOException {
191192
}
192193
}
193194
}
195+
196+
@Test
197+
public void testPathCapabilities() throws IOException {
198+
Configuration conf = getTestConfiguration();
199+
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build()) {
200+
URI defaultUri = URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY));
201+
conf.set("fs.viewfs.mounttable." + defaultUri.getHost() + ".linkFallback",
202+
defaultUri.toString());
203+
try (ViewDistributedFileSystem fileSystem = (ViewDistributedFileSystem) FileSystem.get(
204+
conf)) {
205+
final Path testFile = new Path("/test");
206+
assertTrue("ViewDfs supports truncate",
207+
fileSystem.hasPathCapability(testFile, CommonPathCapabilities.FS_TRUNCATE));
208+
}
209+
}
210+
}
211+
194212
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
import java.io.IOException;
3434
import java.util.concurrent.ThreadLocalRandom;
3535

36+
import org.apache.hadoop.fs.CommonPathCapabilities;
3637
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
3738
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
3839
import org.apache.hadoop.ipc.RemoteException;
@@ -143,6 +144,8 @@ public void testBasicTruncate() throws IOException {
143144
writeContents(contents, fileLength, p);
144145

145146
int newLength = fileLength - toTruncate;
147+
assertTrue("DFS supports truncate",
148+
fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE));
146149
boolean isReady = fs.truncate(p, newLength);
147150
LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
148151
+ ", toTruncate=" + toTruncate + ", isReady=" + isReady);
@@ -176,6 +179,8 @@ public void testMultipleTruncate() throws IOException {
176179

177180
for(int n = data.length; n > 0; ) {
178181
final int newLength = ThreadLocalRandom.current().nextInt(n);
182+
assertTrue("DFS supports truncate",
183+
fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE));
179184
final boolean isReady = fs.truncate(p, newLength);
180185
LOG.info("newLength=" + newLength + ", isReady=" + isReady);
181186
assertEquals("File must be closed for truncating at the block boundary",
@@ -209,6 +214,8 @@ public void testSnapshotTruncateThenDeleteSnapshot() throws IOException {
209214
final int newLength = data.length - 1;
210215
assert newLength % BLOCK_SIZE != 0 :
211216
" newLength must not be multiple of BLOCK_SIZE";
217+
assertTrue("DFS supports truncate",
218+
fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE));
212219
final boolean isReady = fs.truncate(p, newLength);
213220
LOG.info("newLength=" + newLength + ", isReady=" + isReady);
214221
assertEquals("File must be closed for truncating at the block boundary",

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithRestCsrfPreventionFilter.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
import java.util.Arrays;
3030

3131
import org.apache.hadoop.conf.Configuration;
32+
import org.apache.hadoop.fs.CommonPathCapabilities;
3233
import org.apache.hadoop.fs.FileSystem;
3334
import org.apache.hadoop.fs.Path;
3435
import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -160,6 +161,8 @@ public void testTruncate() throws Exception {
160161
if (nnRestCsrf && !clientRestCsrf) {
161162
expectException();
162163
}
164+
assertTrue("WebHdfs supports truncate",
165+
webhdfs.hasPathCapability(FILE, CommonPathCapabilities.FS_TRUNCATE));
163166
assertTrue(webhdfs.truncate(FILE, 0L));
164167
}
165168

0 commit comments

Comments
 (0)