Skip to content

Commit 08409fc

Browse files
author
Haohui Mai
committed
HDFS-6304. Consolidate the logic of path resolution in FSDirectory. Contributed by Haohui Mai.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1591411 13f79535-47bb-0310-9956-ffa450edef68
1 parent 7a9a29e commit 08409fc

File tree

9 files changed

+120
-101
lines changed

9 files changed

+120
-101
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -332,6 +332,9 @@ Release 2.5.0 - UNRELEASED
332332
HDFS-6269. NameNode Audit Log should differentiate between webHDFS open and
333333
HDFS open. (Eric Payne via jeagles)
334334

335+
HDFS-6304. Consolidate the logic of path resolution in FSDirectory.
336+
(wheat9)
337+
335338
OPTIMIZATIONS
336339

337340
HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

Lines changed: 91 additions & 42 deletions
Large diffs are not rendered by default.

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -494,7 +494,7 @@ private int loadDirectory(DataInput in, Counter counter) throws IOException {
494494
// Rename .snapshot paths if we're doing an upgrade
495495
parentPath = renameReservedPathsOnUpgrade(parentPath, getLayoutVersion());
496496
final INodeDirectory parent = INodeDirectory.valueOf(
497-
namesystem.dir.rootDir.getNode(parentPath, true), parentPath);
497+
namesystem.dir.getNode(parentPath, true), parentPath);
498498
return loadChildren(parent, in, counter);
499499
}
500500

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5507,7 +5507,7 @@ private void checkPermission(FSPermissionChecker pc,
55075507
dir.waitForReady();
55085508
readLock();
55095509
try {
5510-
pc.checkPermission(path, dir.rootDir, doCheckOwner, ancestorAccess,
5510+
pc.checkPermission(path, dir, doCheckOwner, ancestorAccess,
55115511
parentAccess, access, subAccess, resolveLink);
55125512
} finally {
55135513
readUnlock();

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ public void checkSuperuserPrivilege()
144144
* Guarded by {@link FSNamesystem#readLock()}
145145
* Caller of this method must hold that lock.
146146
*/
147-
void checkPermission(String path, INodeDirectory root, boolean doCheckOwner,
147+
void checkPermission(String path, FSDirectory dir, boolean doCheckOwner,
148148
FsAction ancestorAccess, FsAction parentAccess, FsAction access,
149149
FsAction subAccess, boolean resolveLink)
150150
throws AccessControlException, UnresolvedLinkException {
@@ -159,7 +159,7 @@ void checkPermission(String path, INodeDirectory root, boolean doCheckOwner,
159159
}
160160
// check if (parentAccess != null) && file exists, then check sb
161161
// If resolveLink, the check is performed on the link target.
162-
final INodesInPath inodesInPath = root.getINodesInPath(path, resolveLink);
162+
final INodesInPath inodesInPath = dir.getINodesInPath(path, resolveLink);
163163
final int snapshotId = inodesInPath.getPathSnapshotId();
164164
final INode[] inodes = inodesInPath.getINodes();
165165
int ancestorIndex = inodes.length - 2;

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java

Lines changed: 0 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -403,53 +403,6 @@ private ReadOnlyList<INode> getCurrentChildrenList() {
403403
: ReadOnlyList.Util.asReadOnlyList(children);
404404
}
405405

406-
/** @return the {@link INodesInPath} containing only the last inode. */
407-
INodesInPath getLastINodeInPath(String path, boolean resolveLink
408-
) throws UnresolvedLinkException {
409-
return INodesInPath.resolve(this, getPathComponents(path), 1, resolveLink);
410-
}
411-
412-
/** @return the {@link INodesInPath} containing all inodes in the path. */
413-
INodesInPath getINodesInPath(String path, boolean resolveLink
414-
) throws UnresolvedLinkException {
415-
final byte[][] components = getPathComponents(path);
416-
return INodesInPath.resolve(this, components, components.length, resolveLink);
417-
}
418-
419-
/** @return the last inode in the path. */
420-
INode getNode(String path, boolean resolveLink)
421-
throws UnresolvedLinkException {
422-
return getLastINodeInPath(path, resolveLink).getINode(0);
423-
}
424-
425-
/**
426-
* @return the INode of the last component in src, or null if the last
427-
* component does not exist.
428-
* @throws UnresolvedLinkException if symlink can't be resolved
429-
* @throws SnapshotAccessControlException if path is in RO snapshot
430-
*/
431-
INode getINode4Write(String src, boolean resolveLink)
432-
throws UnresolvedLinkException, SnapshotAccessControlException {
433-
return getINodesInPath4Write(src, resolveLink).getLastINode();
434-
}
435-
436-
/**
437-
* @return the INodesInPath of the components in src
438-
* @throws UnresolvedLinkException if symlink can't be resolved
439-
* @throws SnapshotAccessControlException if path is in RO snapshot
440-
*/
441-
INodesInPath getINodesInPath4Write(String src, boolean resolveLink)
442-
throws UnresolvedLinkException, SnapshotAccessControlException {
443-
final byte[][] components = INode.getPathComponents(src);
444-
INodesInPath inodesInPath = INodesInPath.resolve(this, components,
445-
components.length, resolveLink);
446-
if (inodesInPath.isSnapshot()) {
447-
throw new SnapshotAccessControlException(
448-
"Modification on a read-only snapshot is disallowed");
449-
}
450-
return inodesInPath;
451-
}
452-
453406
/**
454407
* Given a child's name, return the index of the next child
455408
*

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1306,7 +1306,7 @@ private static void assertAclFeature(boolean expectAclFeature)
13061306
*/
13071307
private static void assertAclFeature(Path pathToCheck,
13081308
boolean expectAclFeature) throws IOException {
1309-
INode inode = cluster.getNamesystem().getFSDirectory().getRoot()
1309+
INode inode = cluster.getNamesystem().getFSDirectory()
13101310
.getNode(pathToCheck.toUri().getPath(), false);
13111311
assertNotNull(inode);
13121312
AclFeature aclFeature = inode.getAclFeature();

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
import java.io.IOException;
2727
import java.util.Arrays;
2828

29+
import org.apache.hadoop.conf.Configuration;
2930
import org.junit.Before;
3031
import org.junit.Test;
3132

@@ -38,7 +39,10 @@
3839
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
3940
import org.apache.hadoop.security.AccessControlException;
4041
import org.apache.hadoop.security.UserGroupInformation;
42+
import org.mockito.invocation.InvocationOnMock;
43+
import org.mockito.stubbing.Answer;
4144

45+
import static org.mockito.Mockito.*;
4246
/**
4347
* Unit tests covering FSPermissionChecker. All tests in this suite have been
4448
* cross-validated against Linux setfacl/getfacl to check for consistency of the
@@ -56,14 +60,24 @@ public class TestFSPermissionChecker {
5660
private static final UserGroupInformation CLARK =
5761
UserGroupInformation.createUserForTesting("clark", new String[] { "execs" });
5862

63+
private FSDirectory dir;
5964
private INodeDirectory inodeRoot;
6065

6166
@Before
6267
public void setUp() {
63-
PermissionStatus permStatus = PermissionStatus.createImmutable(SUPERUSER,
64-
SUPERGROUP, FsPermission.createImmutable((short)0755));
65-
inodeRoot = new INodeDirectory(INodeId.ROOT_INODE_ID,
66-
INodeDirectory.ROOT_NAME, permStatus, 0L);
68+
Configuration conf = new Configuration();
69+
FSNamesystem fsn = mock(FSNamesystem.class);
70+
doAnswer(new Answer() {
71+
@Override
72+
public Object answer(InvocationOnMock invocation) throws Throwable {
73+
Object[] args = invocation.getArguments();
74+
FsPermission perm = (FsPermission) args[0];
75+
return new PermissionStatus(SUPERUSER, SUPERGROUP, perm);
76+
}
77+
}).when(fsn).createFsOwnerPermissions(any(FsPermission.class));
78+
FSImage image = mock(FSImage.class);
79+
dir = new FSDirectory(image, fsn, conf);
80+
inodeRoot = dir.getRoot();
6781
}
6882

6983
@Test
@@ -379,14 +393,14 @@ private void addAcl(INodeWithAdditionalFields inode, AclEntry... acl)
379393
private void assertPermissionGranted(UserGroupInformation user, String path,
380394
FsAction access) throws IOException {
381395
new FSPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(path,
382-
inodeRoot, false, null, null, access, null, true);
396+
dir, false, null, null, access, null, true);
383397
}
384398

385399
private void assertPermissionDenied(UserGroupInformation user, String path,
386400
FsAction access) throws IOException {
387401
try {
388402
new FSPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(path,
389-
inodeRoot, false, null, null, access, null, true);
403+
dir, false, null, null, access, null, true);
390404
fail("expected AccessControlException for user + " + user + ", path = " +
391405
path + ", access = " + access);
392406
} catch (AccessControlException e) {

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -699,7 +699,7 @@ public void testFsckError() throws Exception {
699699
DFSTestUtil.waitReplication(fs, filePath, (short)1);
700700

701701
// intentionally corrupt NN data structure
702-
INodeFile node = (INodeFile)cluster.getNamesystem().dir.rootDir.getNode(
702+
INodeFile node = (INodeFile)cluster.getNamesystem().dir.getNode(
703703
fileName, true);
704704
final BlockInfo[] blocks = node.getBlocks();
705705
assertEquals(blocks.length, 1);

0 commit comments

Comments
 (0)