Skip to content

Commit ae8bca2

Browse files
author
Suresh Srinivas
committed
HDFS-2410. Further cleanup of hardcoded configuration keys and values. Contributed by Suresh Srinivas.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1204753 13f79535-47bb-0310-9956-ffa450edef68
1 parent 79c1049 commit ae8bca2

File tree

53 files changed

+252
-199
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

53 files changed

+252
-199
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,9 @@ Trunk (unreleased changes)
6363

6464
HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
6565

66+
HDFS-2410. Further cleanup of hardcoded configuration keys and values.
67+
(suresh)
68+
6669
OPTIMIZATIONS
6770
HDFS-2477. Optimize computing the diff between a block report and the
6871
namenode state. (Tomasz Nykiel via hairong)

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
3838
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT;
3939
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
40+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
4041
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
4142
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
4243
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
@@ -48,6 +49,7 @@
4849
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY;
4950
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
5051
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
52+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY;
5153
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY;
5254
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT;
5355

@@ -460,11 +462,11 @@ conf, new AccessControlList(conf.get(DFS_ADMIN, " ")),
460462
if(LOG.isDebugEnabled()) {
461463
LOG.debug("Datanode listening on " + infoHost + ":" + tmpInfoPort);
462464
}
463-
if (conf.getBoolean("dfs.https.enable", false)) {
465+
if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
464466
boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
465467
DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
466468
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
467-
"dfs.datanode.https.address", infoHost + ":" + 0));
469+
DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0));
468470
Configuration sslConf = new HdfsConfiguration(false);
469471
sslConf.addResource(conf.get("dfs.https.server.keystore.resource",
470472
"ssl-server.xml"));
@@ -505,7 +507,7 @@ private void startPlugins(Configuration conf) {
505507

506508
private void initIpcServer(Configuration conf) throws IOException {
507509
InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
508-
conf.get("dfs.datanode.ipc.address"));
510+
conf.get(DFS_DATANODE_IPC_ADDRESS_KEY));
509511

510512
// Add all the RPC protocols that the Datanode implements
511513
ClientDatanodeProtocolServerSideTranslatorR23

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,10 @@
1717
*/
1818
package org.apache.hadoop.hdfs.server.namenode;
1919

20+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
21+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
22+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT;
23+
2024
import java.io.IOException;
2125
import java.net.InetSocketAddress;
2226
import java.security.PrivilegedExceptionAction;
@@ -145,7 +149,7 @@ private Map<String, String> getAuthFilterParams(Configuration conf)
145149
}
146150
};
147151

148-
boolean certSSL = conf.getBoolean("dfs.https.enable", false);
152+
boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
149153
boolean useKrb = UserGroupInformation.isSecurityEnabled();
150154
if (certSSL || useKrb) {
151155
boolean needClientAuth = conf.getBoolean(
@@ -156,14 +160,14 @@ private Map<String, String> getAuthFilterParams(Configuration conf)
156160
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
157161
Configuration sslConf = new HdfsConfiguration(false);
158162
if (certSSL) {
159-
sslConf.addResource(conf.get(
160-
"dfs.https.server.keystore.resource", "ssl-server.xml"));
163+
sslConf.addResource(conf.get(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
164+
DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
161165
}
162166
httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth,
163167
useKrb);
164168
// assume same ssl port for all datanodes
165169
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf
166-
.get("dfs.datanode.https.address", infoHost + ":" + 50475));
170+
.get(DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475));
167171
httpServer.setAttribute("datanode.https.port", datanodeSslPort
168172
.getPort());
169173
}

hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction;
3030
import org.apache.hadoop.fi.FiTestUtil;
3131
import org.apache.hadoop.fi.FiTestUtil.Action;
32+
import org.apache.hadoop.fs.CommonConfigurationKeys;
3233
import org.apache.hadoop.fs.FSDataInputStream;
3334
import org.apache.hadoop.fs.FSDataOutputStream;
3435
import org.apache.hadoop.fs.FileSystem;
@@ -56,8 +57,9 @@ public class TestFiDataTransferProtocol {
5657

5758
static private FSDataOutputStream createFile(FileSystem fs, Path p
5859
) throws IOException {
59-
return fs.create(p, true, fs.getConf().getInt("io.file.buffer.size", 4096),
60-
REPLICATION, BLOCKSIZE);
60+
return fs.create(p, true,
61+
fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,
62+
4096), REPLICATION, BLOCKSIZE);
6163
}
6264

6365
{

hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
import org.apache.hadoop.fi.DataTransferTestUtil.SleepAction;
3131
import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction;
3232
import org.apache.hadoop.fi.FiTestUtil;
33+
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY;
3334
import org.apache.hadoop.fs.FSDataInputStream;
3435
import org.apache.hadoop.fs.FSDataOutputStream;
3536
import org.apache.hadoop.fs.FileSystem;
@@ -65,8 +66,8 @@ public class TestFiDataTransferProtocol2 {
6566

6667
static private FSDataOutputStream createFile(FileSystem fs, Path p
6768
) throws IOException {
68-
return fs.create(p, true, fs.getConf().getInt("io.file.buffer.size", 4096),
69-
REPLICATION, BLOCKSIZE);
69+
return fs.create(p, true, fs.getConf()
70+
.getInt(IO_FILE_BUFFER_SIZE_KEY, 4096), REPLICATION, BLOCKSIZE);
7071
}
7172

7273
{

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ public void testGeneralSBBehavior() throws IOException, InterruptedException {
163163
try {
164164
Configuration conf = new HdfsConfiguration();
165165
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
166-
conf.setBoolean("dfs.support.append", true);
166+
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
167167
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
168168

169169
FileSystem hdfs = cluster.getFileSystem();

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -143,8 +143,8 @@ static byte[] initBuffer(int size) {
143143
public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
144144
throws IOException {
145145
return fileSys.create(name, true,
146-
fileSys.getConf().getInt("io.file.buffer.size", 4096),
147-
(short) repl, (long) BLOCK_SIZE);
146+
fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
147+
(short) repl, BLOCK_SIZE);
148148
}
149149

150150
/**

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
3131
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
3232
import org.apache.hadoop.hdfs.server.datanode.DataNode;
33+
import org.apache.hadoop.fs.CommonConfigurationKeys;
3334
import org.apache.hadoop.fs.Path;
3435
import org.apache.hadoop.fs.FileSystem;
3536
import org.apache.hadoop.net.NetUtils;
@@ -148,7 +149,7 @@ public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToR
148149
sock, targetAddr.toString()+ ":" + block.getBlockId(), block,
149150
testBlock.getBlockToken(),
150151
offset, lenToRead,
151-
conf.getInt("io.file.buffer.size", 4096),
152+
conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
152153
true, "");
153154
}
154155

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import org.apache.commons.logging.Log;
2121
import org.apache.commons.logging.LogFactory;
2222
import org.apache.hadoop.conf.Configuration;
23+
import org.apache.hadoop.fs.CommonConfigurationKeys;
2324
import org.apache.hadoop.fs.FSDataOutputStream;
2425
import org.apache.hadoop.fs.Path;
2526
import org.junit.AfterClass;
@@ -66,7 +67,7 @@ public static void startUp () throws IOException {
6667
}
6768

6869
@AfterClass
69-
public static void tearDown() throws IOException {
70+
public static void tearDown() {
7071
cluster.shutdown();
7172
}
7273

@@ -91,7 +92,7 @@ public void testAppend() throws IOException {
9192
new Path("foo"+ oldFileLen +"_"+ flushedBytes1 +"_"+ flushedBytes2);
9293
LOG.info("Creating file " + p);
9394
FSDataOutputStream out = fs.create(p, false,
94-
conf.getInt("io.file.buffer.size", 4096),
95+
conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
9596
REPLICATION, BLOCK_SIZE);
9697
out.write(contents, 0, oldFileLen);
9798
out.close();

0 commit comments

Comments
 (0)