Skip to content

Commit 0efb56e

Browse files
author
Suresh Srinivas
committed
HDFS-4610. Use common utils FileUtil#setReadable/Writable/Executable & FileUtil#canRead/Write/Execute. Contributed by Ivan Mitic.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1477385 13f79535-47bb-0310-9956-ffa450edef68
1 parent 4990c0f commit 0efb56e

File tree

13 files changed

+53
-43
lines changed

13 files changed

+53
-43
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -332,6 +332,9 @@ Trunk (Unreleased)
332332
HDFS-4734. HDFS Tests that use ShellCommandFencer are broken on Windows.
333333
(Arpit Agarwal via suresh)
334334

335+
HDFS-4610. Use common utils FileUtil#setReadable/Writable/Executable &
336+
FileUtil#canRead/Write/Execute. (Ivan Mitic via suresh)
337+
335338
BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
336339

337340
HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -448,7 +448,7 @@ public StorageState analyzeStorage(StartupOption startOpt, Storage storage)
448448
LOG.warn(rootPath + "is not a directory");
449449
return StorageState.NON_EXISTENT;
450450
}
451-
if (!root.canWrite()) {
451+
if (!FileUtil.canWrite(root)) {
452452
LOG.warn("Cannot access storage directory " + rootPath);
453453
return StorageState.NON_EXISTENT;
454454
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
import org.apache.commons.logging.LogFactory;
3434
import org.apache.hadoop.classification.InterfaceAudience;
3535
import org.apache.hadoop.classification.InterfaceStability;
36+
import org.apache.hadoop.fs.FileUtil;
3637
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
3738
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
3839
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -128,7 +129,7 @@ void inspectDirectory(StorageDirectory sd) throws IOException {
128129
static long readCheckpointTime(StorageDirectory sd) throws IOException {
129130
File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
130131
long timeStamp = 0L;
131-
if (timeFile.exists() && timeFile.canRead()) {
132+
if (timeFile.exists() && FileUtil.canRead(timeFile)) {
132133
DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
133134
try {
134135
timeStamp = in.readLong();

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434

3535
import org.apache.hadoop.classification.InterfaceAudience;
3636
import org.apache.hadoop.conf.Configuration;
37+
import org.apache.hadoop.fs.FileUtil;
3738
import org.apache.hadoop.hdfs.DFSUtil;
3839
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
3940
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
@@ -230,8 +231,8 @@ void attemptRestoreRemovedStorage() {
230231
File root = sd.getRoot();
231232
LOG.info("currently disabled dir " + root.getAbsolutePath() +
232233
"; type="+sd.getStorageDirType()
233-
+ ";canwrite="+root.canWrite());
234-
if(root.exists() && root.canWrite()) {
234+
+ ";canwrite="+FileUtil.canWrite(root));
235+
if(root.exists() && FileUtil.canWrite(root)) {
235236
LOG.info("restoring dir " + sd.getRoot().getAbsolutePath());
236237
this.addStorageDir(sd); // restore
237238
this.removedStorageDirs.remove(sd);
@@ -505,7 +506,7 @@ public File getFsImageName(long txid) {
505506
dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
506507
sd = it.next();
507508
File fsImage = getStorageFile(sd, NameNodeFile.IMAGE, txid);
508-
if(sd.getRoot().canRead() && fsImage.exists())
509+
if(FileUtil.canRead(sd.getRoot()) && fsImage.exists())
509510
return fsImage;
510511
}
511512
return null;
@@ -722,7 +723,7 @@ File findImageFile(long txid) {
722723
private File findFile(NameNodeDirType dirType, String name) {
723724
for (StorageDirectory sd : dirIterable(dirType)) {
724725
File candidate = new File(sd.getCurrentDir(), name);
725-
if (sd.getCurrentDir().canRead() &&
726+
if (FileUtil.canRead(sd.getCurrentDir()) &&
726727
candidate.exists()) {
727728
return candidate;
728729
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
import org.apache.commons.logging.LogFactory;
3434
import org.apache.hadoop.classification.InterfaceAudience;
3535
import org.apache.hadoop.conf.Configuration;
36+
import org.apache.hadoop.fs.FileUtil;
3637
import org.apache.hadoop.http.HttpConfig;
3738
import org.apache.hadoop.security.SecurityUtil;
3839
import org.apache.hadoop.util.Time;
@@ -102,7 +103,7 @@ static void downloadEditsToStorage(String fsName, RemoteEditLog log,
102103
assert !dstFiles.isEmpty() : "No checkpoint targets.";
103104

104105
for (File f : dstFiles) {
105-
if (f.exists() && f.canRead()) {
106+
if (f.exists() && FileUtil.canRead(f)) {
106107
LOG.info("Skipping download of remote edit log " +
107108
log + " since it already is stored locally at " + f);
108109
return;

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -681,9 +681,9 @@ private String createPermissionsDiagnosisString(File path) {
681681
sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n");
682682
sb.append("\tpermissions: ");
683683
sb.append(path.isDirectory() ? "d": "-");
684-
sb.append(path.canRead() ? "r" : "-");
685-
sb.append(path.canWrite() ? "w" : "-");
686-
sb.append(path.canExecute() ? "x" : "-");
684+
sb.append(FileUtil.canRead(path) ? "r" : "-");
685+
sb.append(FileUtil.canWrite(path) ? "w" : "-");
686+
sb.append(FileUtil.canExecute(path) ? "x" : "-");
687687
sb.append("\n");
688688
path = path.getParentFile();
689689
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131

3232
import org.apache.hadoop.conf.Configuration;
3333
import org.apache.hadoop.fs.FileSystem;
34+
import org.apache.hadoop.fs.FileUtil;
3435
import org.apache.hadoop.fs.Path;
3536
import org.apache.hadoop.hdfs.BlockReader;
3637
import org.apache.hadoop.hdfs.BlockReaderFactory;
@@ -91,10 +92,10 @@ public void setUp() throws Exception {
9192
@After
9293
public void tearDown() throws Exception {
9394
if(data_fail != null) {
94-
data_fail.setWritable(true);
95+
FileUtil.setWritable(data_fail, true);
9596
}
9697
if(failedDir != null) {
97-
failedDir.setWritable(true);
98+
FileUtil.setWritable(failedDir, true);
9899
}
99100
if(cluster != null) {
100101
cluster.shutdown();

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
import org.apache.commons.logging.impl.Log4JLogger;
3232
import org.apache.hadoop.conf.Configuration;
3333
import org.apache.hadoop.fs.FileSystem;
34+
import org.apache.hadoop.fs.FileUtil;
3435
import org.apache.hadoop.fs.Path;
3536
import org.apache.hadoop.hdfs.DFSConfigKeys;
3637
import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -88,8 +89,8 @@ public void setUp() throws Exception {
8889
@After
8990
public void tearDown() throws Exception {
9091
for (int i = 0; i < 3; i++) {
91-
new File(dataDir, "data"+(2*i+1)).setExecutable(true);
92-
new File(dataDir, "data"+(2*i+2)).setExecutable(true);
92+
FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true);
93+
FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true);
9394
}
9495
cluster.shutdown();
9596
}
@@ -131,8 +132,8 @@ public void testSuccessiveVolumeFailures() throws Exception {
131132
* fail. The client does not retry failed nodes even though
132133
* perhaps they could succeed because just a single volume failed.
133134
*/
134-
assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(false));
135-
assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false));
135+
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false));
136+
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
136137

137138
/*
138139
* Create file1 and wait for 3 replicas (ie all DNs can still
@@ -168,7 +169,7 @@ public void testSuccessiveVolumeFailures() throws Exception {
168169
* Now fail a volume on the third datanode. We should be able to get
169170
* three replicas since we've already identified the other failures.
170171
*/
171-
assertTrue("Couldn't chmod local vol", dn3Vol1.setExecutable(false));
172+
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, false));
172173
Path file2 = new Path("/test2");
173174
DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
174175
DFSTestUtil.waitReplication(fs, file2, (short)3);
@@ -200,7 +201,7 @@ public void testSuccessiveVolumeFailures() throws Exception {
200201
* and that it's no longer up. Only wait for two replicas since
201202
* we'll never get a third.
202203
*/
203-
assertTrue("Couldn't chmod local vol", dn3Vol2.setExecutable(false));
204+
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, false));
204205
Path file3 = new Path("/test3");
205206
DFSTestUtil.createFile(fs, file3, 1024, (short)3, 1L);
206207
DFSTestUtil.waitReplication(fs, file3, (short)2);
@@ -222,10 +223,10 @@ public void testSuccessiveVolumeFailures() throws Exception {
222223
* restart, so file creation should be able to succeed after
223224
* restoring the data directories and restarting the datanodes.
224225
*/
225-
assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(true));
226-
assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(true));
227-
assertTrue("Couldn't chmod local vol", dn3Vol1.setExecutable(true));
228-
assertTrue("Couldn't chmod local vol", dn3Vol2.setExecutable(true));
226+
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, true));
227+
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true));
228+
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, true));
229+
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, true));
229230
cluster.restartDataNodes();
230231
cluster.waitActive();
231232
Path file4 = new Path("/test4");
@@ -261,8 +262,8 @@ public void testVolFailureStatsPreservedOnNNRestart() throws Exception {
261262
// third healthy so one node in the pipeline will not fail).
262263
File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
263264
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
264-
assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(false));
265-
assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false));
265+
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false));
266+
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
266267

267268
Path file1 = new Path("/test1");
268269
DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L);

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,8 @@ public void setUp() throws Exception {
7777
@After
7878
public void tearDown() throws Exception {
7979
for (int i = 0; i < 3; i++) {
80-
new File(dataDir, "data"+(2*i+1)).setExecutable(true);
81-
new File(dataDir, "data"+(2*i+2)).setExecutable(true);
80+
FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true);
81+
FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true);
8282
}
8383
cluster.shutdown();
8484
}
@@ -152,7 +152,7 @@ public void testConfigureMinValidVolumes() throws Exception {
152152

153153
// Fail a volume on the 2nd DN
154154
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
155-
assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false));
155+
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
156156

157157
// Should only get two replicas (the first DN and the 3rd)
158158
Path file1 = new Path("/test1");
@@ -165,7 +165,7 @@ public void testConfigureMinValidVolumes() throws Exception {
165165

166166
// If we restore the volume we should still only be able to get
167167
// two replicas since the DN is still considered dead.
168-
assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(true));
168+
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true));
169169
Path file2 = new Path("/test2");
170170
DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
171171
DFSTestUtil.waitReplication(fs, file2, (short)2);

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727

2828
import org.apache.hadoop.conf.Configuration;
2929
import org.apache.hadoop.fs.FileSystem;
30+
import org.apache.hadoop.fs.FileUtil;
3031
import org.apache.hadoop.fs.Path;
3132
import org.apache.hadoop.fs.permission.FsPermission;
3233
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -106,8 +107,8 @@ public void testShutdown() throws Exception {
106107
}
107108
} finally {
108109
// restore its old permission
109-
dir1.setWritable(true);
110-
dir2.setWritable(true);
110+
FileUtil.setWritable(dir1, true);
111+
FileUtil.setWritable(dir2, true);
111112
}
112113
}
113114

0 commit comments

Comments
 (0)