Skip to content

Commit de89684

Browse files
committed
HDFS-6572. Add an option to the NameNode that prints the software and on-disk image versions. Contributed by Charles Lamb.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1605928 13f79535-47bb-0310-9956-ffa450edef68
1 parent 352cd76 commit de89684

File tree

5 files changed

+132
-13
lines changed

5 files changed

+132
-13
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -479,6 +479,9 @@ Release 2.5.0 - UNRELEASED
479479
HDFS-6595. Allow the maximum threads for balancing on datanodes to be
480480
configurable. (Benoy Antony via szetszwo)
481481

482+
HDFS-6572. Add an option to the NameNode that prints the software and
483+
on-disk image versions. (Charles Lamb via cnauroth)
484+
482485
OPTIMIZATIONS
483486

484487
HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,8 @@ static public enum StartupOption{
9292
RECOVER ("-recover"),
9393
FORCE("-force"),
9494
NONINTERACTIVE("-nonInteractive"),
95-
RENAMERESERVED("-renameReserved");
95+
RENAMERESERVED("-renameReserved"),
96+
METADATAVERSION("-metadataVersion");
9697

9798
private static final Pattern ENUM_WITH_ROLLING_UPGRADE_OPTION = Pattern.compile(
9899
"(\\w+)\\((\\w+)\\)");

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -214,6 +214,13 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target,
214214

215215

216216
int layoutVersion = storage.getLayoutVersion();
217+
if (startOpt == StartupOption.METADATAVERSION) {
218+
System.out.println("HDFS Image Version: " + layoutVersion);
219+
System.out.println("Software format version: " +
220+
HdfsConstants.NAMENODE_LAYOUT_VERSION);
221+
return false;
222+
}
223+
217224
if (layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION) {
218225
NNStorage.checkVersionUpgradable(storage.getLayoutVersion());
219226
}
@@ -289,6 +296,12 @@ private boolean recoverStorageDirs(StartupOption startOpt,
289296
storage.dirIterator(); it.hasNext();) {
290297
StorageDirectory sd = it.next();
291298
StorageState curState;
299+
if (startOpt == StartupOption.METADATAVERSION) {
300+
/* All we need is the layout version. */
301+
storage.readProperties(sd);
302+
return true;
303+
}
304+
292305
try {
293306
curState = sd.analyzeStorage(startOpt, storage);
294307
// sd is locked but not opened

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

Lines changed: 37 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -201,25 +201,28 @@ public static enum OperationCategory {
201201
};
202202

203203
private static final String USAGE = "Usage: java NameNode ["
204-
+ StartupOption.BACKUP.getName() + "] | ["
205-
+ StartupOption.CHECKPOINT.getName() + "] | ["
204+
+ StartupOption.BACKUP.getName() + "] | \n\t["
205+
+ StartupOption.CHECKPOINT.getName() + "] | \n\t["
206206
+ StartupOption.FORMAT.getName() + " ["
207207
+ StartupOption.CLUSTERID.getName() + " cid ] ["
208208
+ StartupOption.FORCE.getName() + "] ["
209-
+ StartupOption.NONINTERACTIVE.getName() + "] ] | ["
209+
+ StartupOption.NONINTERACTIVE.getName() + "] ] | \n\t["
210210
+ StartupOption.UPGRADE.getName() +
211211
" [" + StartupOption.CLUSTERID.getName() + " cid]" +
212-
" [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | ["
213-
+ StartupOption.ROLLBACK.getName() + "] | ["
212+
" [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | \n\t["
213+
+ StartupOption.ROLLBACK.getName() + "] | \n\t["
214214
+ StartupOption.ROLLINGUPGRADE.getName() + " <"
215215
+ RollingUpgradeStartupOption.DOWNGRADE.name().toLowerCase() + "|"
216-
+ RollingUpgradeStartupOption.ROLLBACK.name().toLowerCase() + "> ] | ["
217-
+ StartupOption.FINALIZE.getName() + "] | ["
218-
+ StartupOption.IMPORT.getName() + "] | ["
219-
+ StartupOption.INITIALIZESHAREDEDITS.getName() + "] | ["
220-
+ StartupOption.BOOTSTRAPSTANDBY.getName() + "] | ["
221-
+ StartupOption.RECOVER.getName() + " [ " + StartupOption.FORCE.getName()
222-
+ " ] ]";
216+
+ RollingUpgradeStartupOption.ROLLBACK.name().toLowerCase() + "> ] | \n\t["
217+
+ StartupOption.FINALIZE.getName() + "] | \n\t["
218+
+ StartupOption.IMPORT.getName() + "] | \n\t["
219+
+ StartupOption.INITIALIZESHAREDEDITS.getName() + "] | \n\t["
220+
+ StartupOption.BOOTSTRAPSTANDBY.getName() + "] | \n\t["
221+
+ StartupOption.RECOVER.getName() + " [ "
222+
+ StartupOption.FORCE.getName() + "] ] | \n\t["
223+
+ StartupOption.METADATAVERSION.getName() + " ] "
224+
+ " ]";
225+
223226

224227
public long getProtocolVersion(String protocol,
225228
long clientVersion) throws IOException {
@@ -1266,6 +1269,8 @@ static StartupOption parseArguments(String args[]) {
12661269
"can't understand option \"" + args[i] + "\"");
12671270
}
12681271
}
1272+
} else if (StartupOption.METADATAVERSION.getName().equalsIgnoreCase(cmd)) {
1273+
startOpt = StartupOption.METADATAVERSION;
12691274
} else {
12701275
return null;
12711276
}
@@ -1318,6 +1323,21 @@ private static void doRecovery(StartupOption startOpt, Configuration conf)
13181323
}
13191324
}
13201325

1326+
/**
1327+
* Verify that configured directories exist, then print the metadata versions
1328+
* of the software and the image.
1329+
*
1330+
* @param conf configuration to use
1331+
* @throws IOException
1332+
*/
1333+
private static boolean printMetadataVersion(Configuration conf)
1334+
throws IOException {
1335+
final FSImage fsImage = new FSImage(conf);
1336+
final FSNamesystem fs = new FSNamesystem(conf, fsImage, false);
1337+
return fsImage.recoverTransitionRead(
1338+
StartupOption.METADATAVERSION, fs, null);
1339+
}
1340+
13211341
public static NameNode createNameNode(String argv[], Configuration conf)
13221342
throws IOException {
13231343
LOG.info("createNameNode " + Arrays.asList(argv));
@@ -1382,6 +1402,11 @@ public static NameNode createNameNode(String argv[], Configuration conf)
13821402
NameNode.doRecovery(startOpt, conf);
13831403
return null;
13841404
}
1405+
case METADATAVERSION: {
1406+
printMetadataVersion(conf);
1407+
terminate(0);
1408+
return null; // avoid javac warning
1409+
}
13851410
default: {
13861411
DefaultMetricsSystem.initialize("NameNode");
13871412
return new NameNode(conf);
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.server.namenode;
19+
20+
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
21+
import static org.junit.Assert.assertTrue;
22+
23+
import org.apache.hadoop.conf.Configuration;
24+
import org.apache.hadoop.hdfs.MiniDFSCluster;
25+
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
26+
27+
import org.junit.After;
28+
import org.junit.Before;
29+
import org.junit.Test;
30+
31+
import java.io.ByteArrayOutputStream;
32+
import java.io.IOException;
33+
import java.io.PrintStream;
34+
35+
public class TestMetadataVersionOutput {
36+
37+
private MiniDFSCluster dfsCluster = null;
38+
private final Configuration conf = new Configuration();
39+
40+
@Before
41+
public void setUp() throws Exception {
42+
dfsCluster = new MiniDFSCluster.Builder(conf).
43+
numDataNodes(1).
44+
checkExitOnShutdown(false).
45+
build();
46+
dfsCluster.waitClusterUp();
47+
}
48+
49+
@After
50+
public void tearDown() throws Exception {
51+
if (dfsCluster != null) {
52+
dfsCluster.shutdown();
53+
}
54+
Thread.sleep(2000);
55+
}
56+
57+
@Test(timeout = 30000)
58+
public void testMetadataVersionOutput() throws IOException {
59+
60+
final PrintStream origOut = System.out;
61+
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
62+
final PrintStream stdOut = new PrintStream(baos);
63+
System.setOut(stdOut);
64+
try {
65+
NameNode.createNameNode(new String[] { "-metadataVersion" }, conf);
66+
} catch (Exception e) {
67+
assertExceptionContains("ExitException", e);
68+
}
69+
/* Check if meta data version is printed correctly. */
70+
final String verNumStr = HdfsConstants.NAMENODE_LAYOUT_VERSION + "";
71+
assertTrue(baos.toString("UTF-8").
72+
contains("HDFS Image Version: " + verNumStr));
73+
assertTrue(baos.toString("UTF-8").
74+
contains("Software format version: " + verNumStr));
75+
System.setOut(origOut);
76+
}
77+
}

0 commit comments

Comments
 (0)