Skip to content

Commit 892fa48

Browse files
prasad-acitshvachko
authored andcommitted
HDFS-16141. [FGL] Address permission related issues with File / Directory. Contributed by Renukaprasad C. (#3205)
1 parent 88484fc commit 892fa48

File tree

7 files changed

+76
-11
lines changed

7 files changed

+76
-11
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -70,28 +70,38 @@ static FileStatus mkdirs(FSNamesystem fsn, FSPermissionChecker pc, String src,
7070
// create multiple inodes.
7171
fsn.checkFsObjectLimit();
7272

73-
iip = createMissingDirs(fsd, iip, permissions);
73+
iip = createMissingDirs(fsd, iip, permissions, false);
7474
}
7575
return fsd.getAuditFileInfo(iip);
7676
} finally {
7777
fsd.writeUnlock();
7878
}
7979
}
8080

81-
static INodesInPath createMissingDirs(FSDirectory fsd,
82-
INodesInPath iip, PermissionStatus permissions) throws IOException {
81+
static INodesInPath createMissingDirs(FSDirectory fsd, INodesInPath iip,
82+
PermissionStatus permissions, boolean inheritPerms) throws IOException {
83+
PermissionStatus basePerm = inheritPerms ?
84+
iip.getExistingINodes().getLastINode().getPermissionStatus() :
85+
permissions;
8386
// create all missing directories along the path,
8487
// but don't add them to the INodeMap yet
85-
permissions = addImplicitUwx(permissions, permissions); // SHV !!!
88+
permissions = addImplicitUwx(basePerm, permissions);
8689
INode[] missing = createPathDirectories(fsd, iip, permissions);
8790
iip = iip.getExistingINodes();
8891
if (missing.length == 0) {
8992
return iip;
9093
}
9194
// switch the locks
9295
fsd.getINodeMap().latchWriteLock(iip, missing);
96+
int counter = 0;
9397
// Add missing inodes to the INodeMap
9498
for (INode dir : missing) {
99+
if (counter++ == missing.length - 1) {
100+
//Last folder in the path, use the user given permission
101+
//For MKDIR - refers to the permission given by the user
102+
//For create - refers to the parent directory permission.
103+
permissions = basePerm;
104+
}
95105
iip = addSingleDirectory(fsd, iip, dir, permissions);
96106
assert iip != null : "iip should not be null";
97107
}
@@ -279,13 +289,10 @@ private static INode[] createPathDirectories(FSDirectory fsd,
279289
// create the missing directories along the path
280290
INode[] missing = new INode[numMissing];
281291
final int last = iip.length();
282-
INode parent = existing.getLastINode();
283292
for (int i = existing.length(); i < last; i++) {
284293
byte[] component = iip.getPathComponent(i);
285294
missing[i - existing.length()] =
286295
createDirectoryINode(fsd, existing, component, perm);
287-
missing[i - existing.length()].setParent(parent.asDirectory());
288-
parent = missing[i - existing.length()];
289296
}
290297
return missing;
291298
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -400,7 +400,7 @@ static HdfsFileStatus startFile(
400400
fsn.checkFsObjectLimit();
401401
INodeFile newNode = null;
402402
INodesInPath parent = FSDirMkdirOp.createMissingDirs(fsd,
403-
iip.getParentINodesInPath(), permissions);
403+
iip.getParentINodesInPath(), permissions, true);
404404
if (parent != null) {
405405
iip = addFile(fsd, parent, iip.getLastLocalName(), permissions,
406406
replication, blockSize, holder, clientMachine, shouldReplicate,

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1787,7 +1787,7 @@ public void writeUnlock(String opName,
17871787
@Override
17881788
public boolean hasWriteLock() {
17891789
return this.fsLock.isWriteLockedByCurrentThread() ||
1790-
fsLock.haswWriteChildLock();
1790+
fsLock.hasWriteChildLock();
17911791
}
17921792
@Override
17931793
public boolean hasReadLock() {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ boolean removeChildLock(INodeMapLock lock) {
149149
return partitionLocks.get().remove(lock);
150150
}
151151

152-
boolean haswWriteChildLock() {
152+
boolean hasWriteChildLock() {
153153
Iterator<INodeMapLock> iter = partitionLocks.get().iterator();
154154
// FSNamesystem.LOG.debug("partitionLocks.size = {}", partitionLocks.get().size());
155155
while(iter.hasNext()) {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,8 @@ protected void readChildUnlock() {
153153

154154
@Override
155155
protected boolean hasWriteChildLock() {
156-
return this.childLock.isWriteLockedByCurrentThread();
156+
return this.childLock.isWriteLockedByCurrentThread() || namesystem
157+
.getFSLock().hasWriteChildLock();
157158
}
158159

159160
@Override

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@
2525
import org.apache.hadoop.conf.Configuration;
2626
import org.apache.hadoop.fs.FileSystem;
2727
import org.apache.hadoop.fs.InvalidPathException;
28+
import org.apache.hadoop.fs.LocatedFileStatus;
29+
import org.apache.hadoop.fs.RemoteIterator;
2830
import org.apache.hadoop.fs.ParentNotDirectoryException;
2931
import org.apache.hadoop.fs.Path;
3032
import org.apache.hadoop.fs.permission.FsPermission;
@@ -152,4 +154,55 @@ public void testMkdirRpcNonCanonicalPath() throws IOException {
152154
cluster.shutdown();
153155
}
154156
}
157+
158+
@Test
159+
public void testMkDirsWithRestart() throws IOException {
160+
MiniDFSCluster cluster =
161+
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
162+
DistributedFileSystem dfs = cluster.getFileSystem();
163+
try {
164+
Path dir1 = new Path("/mkdir-1");
165+
Path file1 = new Path(dir1, "file1");
166+
Path deleteDir = new Path("/deleteDir");
167+
Path deleteFile = new Path(dir1, "deleteFile");
168+
// Create a dir in root dir, should succeed
169+
assertTrue(dfs.mkdir(dir1, FsPermission.getDefault()));
170+
dfs.mkdir(deleteDir, FsPermission.getDefault());
171+
assertTrue(dfs.exists(deleteDir));
172+
dfs.delete(deleteDir, true);
173+
assertTrue(!dfs.exists(deleteDir));
174+
175+
DFSTestUtil.writeFile(dfs, file1, "hello world");
176+
DFSTestUtil.writeFile(dfs, deleteFile, "hello world");
177+
int totalFiles = getFileCount(dfs);
178+
//Before deletion there are 2 files
179+
assertTrue("Incorrect file count", 2 == totalFiles);
180+
dfs.delete(deleteFile, false);
181+
totalFiles = getFileCount(dfs);
182+
//After deletion, left with 1 file
183+
assertTrue("Incorrect file count", 1 == totalFiles);
184+
185+
cluster.restartNameNodes();
186+
dfs = cluster.getFileSystem();
187+
assertTrue(dfs.exists(dir1));
188+
assertTrue(!dfs.exists(deleteDir));
189+
assertTrue(dfs.exists(file1));
190+
totalFiles = getFileCount(dfs);
191+
assertTrue("Incorrect file count", 1 == totalFiles);
192+
} finally {
193+
dfs.close();
194+
cluster.shutdown();
195+
}
196+
}
197+
198+
private int getFileCount(DistributedFileSystem dfs) throws IOException {
199+
RemoteIterator<LocatedFileStatus> fileItr =
200+
dfs.listFiles(new Path("/"), true);
201+
int totalFiles = 0;
202+
while (fileItr.hasNext()) {
203+
fileItr.next();
204+
totalFiles++;
205+
}
206+
return totalFiles;
207+
}
155208
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1313,6 +1313,10 @@ public void testFileIdMismatch() throws IOException {
13131313
fail();
13141314
} catch(FileNotFoundException e) {
13151315
FileSystem.LOG.info("Caught Expected FileNotFoundException: ", e);
1316+
} catch (AssertionError ae) {
1317+
//FSDirWriteFileOp#completeFile throws AssertError if the given
1318+
// id/node is not an instance of INodeFile.
1319+
FileSystem.LOG.info("Caught Expected AssertionError: ", ae);
13161320
}
13171321
} finally {
13181322
IOUtils.closeStream(dfs);

0 commit comments

Comments
 (0)