Skip to content

Commit e08ae01

Browse files
committed
HDFS-4927. Merging change r1496351 from branch-2 to branch-2.1-beta.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2.1-beta@1496352 13f79535-47bb-0310-9956-ffa450edef68
1 parent 31f7093 commit e08ae01

File tree

3 files changed

+123
-7
lines changed

3 files changed

+123
-7
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -381,6 +381,9 @@ Release 2.1.0-beta - UNRELEASED
381381

382382
HDFS-4205. fsck fails with symlinks. (jlowe)
383383

384+
HDFS-4927. CreateEditsLog creates inodes with an invalid inode ID, which then
385+
cannot be loaded by a namenode. (cnauroth)
386+
384387
BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
385388

386389
HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
2727
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
2828
import org.apache.hadoop.hdfs.server.common.Storage;
29+
import org.apache.hadoop.hdfs.server.namenode.INodeId;
2930

3031
/**
3132
*
@@ -62,8 +63,9 @@ static void addFiles(FSEditLog editLog, int numFiles, short replication,
6263

6364
PermissionStatus p = new PermissionStatus("joeDoe", "people",
6465
new FsPermission((short)0777));
65-
INodeDirectory dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
66-
null, p, 0L);
66+
INodeId inodeId = new INodeId();
67+
INodeDirectory dirInode = new INodeDirectory(inodeId.nextValue(), null, p,
68+
0L);
6769
editLog.logMkDir(BASE_PATH, dirInode);
6870
long blockSize = 10;
6971
BlockInfo[] blocks = new BlockInfo[blocksPerFile];
@@ -82,8 +84,8 @@ static void addFiles(FSEditLog editLog, int numFiles, short replication,
8284
}
8385

8486
INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
85-
INodeId.GRANDFATHER_INODE_ID, null, replication, 0, blockSize,
86-
blocks, p, "", "", null);
87+
inodeId.nextValue(), null, replication, 0, blockSize, blocks, p, "",
88+
"", null);
8789
// Append path to filename with information about blockIDs
8890
String path = "_" + iF + "_B" + blocks[0].getBlockId() +
8991
"_to_B" + blocks[blocksPerFile-1].getBlockId() + "_";
@@ -92,12 +94,11 @@ static void addFiles(FSEditLog editLog, int numFiles, short replication,
9294
// Log the new sub directory in edits
9395
if ((iF % nameGenerator.getFilesPerDirectory()) == 0) {
9496
String currentDir = nameGenerator.getCurrentDir();
95-
dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, null, p, 0L);
97+
dirInode = new INodeDirectory(inodeId.nextValue(), null, p, 0L);
9698
editLog.logMkDir(currentDir, dirInode);
9799
}
98100
editLog.logOpenFile(filePath, new INodeFileUnderConstruction(
99-
INodeId.GRANDFATHER_INODE_ID, p, replication, 0, blockSize, "", "",
100-
null));
101+
inodeId.nextValue(), p, replication, 0, blockSize, "", "", null));
101102
editLog.logCloseFile(filePath, inode);
102103

103104
if (currentBlockId - bidAtSync >= 2000) { // sync every 2K blocks
Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.server.namenode;
19+
20+
import static org.junit.Assert.fail;
21+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
22+
23+
import java.io.File;
24+
25+
import org.junit.After;
26+
import org.junit.Before;
27+
import org.junit.Test;
28+
29+
import org.apache.hadoop.fs.FileContext;
30+
import org.apache.hadoop.fs.FileStatus;
31+
import org.apache.hadoop.fs.FileUtil;
32+
import org.apache.hadoop.fs.Path;
33+
import org.apache.hadoop.hdfs.DFSTestUtil;
34+
import org.apache.hadoop.hdfs.HdfsConfiguration;
35+
import org.apache.hadoop.hdfs.MiniDFSCluster;
36+
import org.apache.hadoop.hdfs.server.common.Util;
37+
38+
/**
39+
* Tests the CreateEditsLog utility.
40+
*/
41+
public class TestCreateEditsLog {
42+
43+
private static final File HDFS_DIR = new File(
44+
MiniDFSCluster.getBaseDirectory()).getAbsoluteFile();
45+
private static final File TEST_DIR = new File(
46+
System.getProperty("test.build.data", "build/test/data"),
47+
"TestCreateEditsLog").getAbsoluteFile();
48+
49+
private MiniDFSCluster cluster;
50+
51+
@Before
52+
public void setUp() throws Exception {
53+
deleteIfExists(HDFS_DIR);
54+
deleteIfExists(TEST_DIR);
55+
}
56+
57+
@After
58+
public void tearDown() {
59+
if (cluster != null) {
60+
cluster.shutdown();
61+
cluster = null;
62+
}
63+
deleteIfExists(HDFS_DIR);
64+
deleteIfExists(TEST_DIR);
65+
}
66+
67+
/**
68+
* Tests that an edits log created using CreateEditsLog is valid and can be
69+
* loaded successfully by a namenode.
70+
*/
71+
@Test(timeout=60000)
72+
public void testCanLoadCreatedEditsLog() throws Exception {
73+
// Format namenode.
74+
HdfsConfiguration conf = new HdfsConfiguration();
75+
File nameDir = new File(HDFS_DIR, "name");
76+
conf.set(DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(nameDir).toString());
77+
DFSTestUtil.formatNameNode(conf);
78+
79+
// Call CreateEditsLog and move the resulting edits to the name dir.
80+
CreateEditsLog.main(new String[] { "-f", "1000", "0", "1", "-d",
81+
TEST_DIR.getAbsolutePath() });
82+
Path editsWildcard = new Path(TEST_DIR.getAbsolutePath(), "*");
83+
FileContext localFc = FileContext.getLocalFSFileContext();
84+
for (FileStatus edits: localFc.util().globStatus(editsWildcard)) {
85+
Path src = edits.getPath();
86+
Path dst = new Path(new File(nameDir, "current").getAbsolutePath(),
87+
src.getName());
88+
localFc.rename(src, dst);
89+
}
90+
91+
// Start a namenode to try to load the edits.
92+
cluster = new MiniDFSCluster.Builder(conf)
93+
.format(false)
94+
.manageNameDfsDirs(false)
95+
.waitSafeMode(false)
96+
.build();
97+
cluster.waitClusterUp();
98+
99+
// Test successful, because no exception thrown.
100+
}
101+
102+
/**
103+
* Fully delete the given directory if it exists.
104+
*
105+
* @param file File to delete
106+
*/
107+
private static void deleteIfExists(File file) {
108+
if (file.exists() && !FileUtil.fullyDelete(file)) {
109+
fail("Could not delete '" + file + "'");
110+
}
111+
}
112+
}

0 commit comments

Comments
 (0)