Skip to content

Commit ecbcb05

Browse files
committed
HDFS-14731. [FGL] Remove redundant locking on NameNode. Contributed by Konstantin V Shvachko.
1 parent ed70c11 commit ecbcb05

File tree

7 files changed

+46
-32
lines changed

7 files changed

+46
-32
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -218,7 +218,12 @@ private synchronized void applyEdits(long firstTxId, int numTxns, byte[] data)
218218
}
219219
lastAppliedTxId = logLoader.getLastAppliedTxId();
220220

221-
getNamesystem().dir.updateCountForQuota();
221+
getNamesystem().writeLock();
222+
try {
223+
getNamesystem().dir.updateCountForQuota();
224+
} finally {
225+
getNamesystem().writeUnlock();
226+
}
222227
} finally {
223228
backupInputStream.clear();
224229
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -187,11 +187,11 @@ public void pauseForTestingAfterNthCheckpoint(final String zone,
187187
final int count) throws IOException {
188188
INodesInPath iip;
189189
final FSPermissionChecker pc = dir.getPermissionChecker();
190-
dir.readLock();
190+
dir.getFSNamesystem().readLock();
191191
try {
192192
iip = dir.resolvePath(pc, zone, DirOp.READ);
193193
} finally {
194-
dir.readUnlock();
194+
dir.getFSNamesystem().readUnlock();
195195
}
196196
reencryptionHandler
197197
.pauseForTestingAfterNthCheckpoint(iip.getLastINode().getId(), count);
@@ -280,11 +280,11 @@ void stopReencryptThread() {
280280
if (getProvider() == null || reencryptionHandler == null) {
281281
return;
282282
}
283-
dir.writeLock();
283+
dir.getFSNamesystem().writeLock();
284284
try {
285285
reencryptionHandler.stopThreads();
286286
} finally {
287-
dir.writeUnlock();
287+
dir.getFSNamesystem().writeUnlock();
288288
}
289289
if (reencryptHandlerExecutor != null) {
290290
reencryptHandlerExecutor.shutdownNow();

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -382,7 +382,6 @@ private static ZoneEncryptionInfoProto getZoneEncryptionInfoProto(
382382
static void saveFileXAttrsForBatch(FSDirectory fsd,
383383
List<FileEdekInfo> batch) {
384384
assert fsd.getFSNamesystem().hasWriteLock();
385-
assert !fsd.hasWriteLock();
386385
if (batch != null && !batch.isEmpty()) {
387386
for (FileEdekInfo entry : batch) {
388387
final INode inode = fsd.getInode(entry.getInodeId());
@@ -727,13 +726,13 @@ static String getKeyNameForZone(final FSDirectory dir,
727726
final FSPermissionChecker pc, final String zone) throws IOException {
728727
assert dir.getProvider() != null;
729728
final INodesInPath iip;
730-
dir.readLock();
729+
dir.getFSNamesystem().readLock();
731730
try {
732731
iip = dir.resolvePath(pc, zone, DirOp.READ);
733732
dir.ezManager.checkEncryptionZoneRoot(iip.getLastINode(), zone);
734733
return dir.ezManager.getKeyName(iip);
735734
} finally {
736-
dir.readUnlock();
735+
dir.getFSNamesystem().readUnlock();
737736
}
738737
}
739738
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

Lines changed: 17 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,6 @@
8282
import java.util.TreeSet;
8383
import java.util.concurrent.ForkJoinPool;
8484
import java.util.concurrent.RecursiveAction;
85-
import java.util.concurrent.locks.ReentrantReadWriteLock;
8685

8786
import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES;
8887
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT;
@@ -172,9 +171,6 @@ private static INodeDirectory createRoot(FSNamesystem namesystem) {
172171
// Each entry in this set must be a normalized path.
173172
private volatile SortedSet<String> protectedDirectories;
174173

175-
// lock to protect the directory and BlockMap
176-
private final ReentrantReadWriteLock dirLock;
177-
178174
private final boolean isPermissionEnabled;
179175
private final boolean isPermissionContentSummarySubAccess;
180176
/**
@@ -215,37 +211,44 @@ public void setINodeAttributeProvider(INodeAttributeProvider provider) {
215211
attributeProvider = provider;
216212
}
217213

218-
// utility methods to acquire and release read lock and write lock
214+
/**
215+
* The directory lock dirLock provided redundant locking.
216+
* It has been used whenever namesystem.fsLock was used.
217+
* dirLock is now removed and utility methods to acquire and release dirLock
218+
* remain as placeholders only
219+
*/
219220
void readLock() {
220-
this.dirLock.readLock().lock();
221+
assert namesystem.hasReadLock() : "Should hold namesystem read lock";
221222
}
222223

223224
void readUnlock() {
224-
this.dirLock.readLock().unlock();
225+
assert namesystem.hasReadLock() : "Should hold namesystem read lock";
225226
}
226227

227228
void writeLock() {
228-
this.dirLock.writeLock().lock();
229+
assert namesystem.hasWriteLock() : "Should hold namesystem write lock";
229230
}
230231

231232
void writeUnlock() {
232-
this.dirLock.writeLock().unlock();
233+
assert namesystem.hasWriteLock() : "Should hold namesystem write lock";
233234
}
234235

235236
boolean hasWriteLock() {
236-
return this.dirLock.isWriteLockedByCurrentThread();
237+
return namesystem.hasWriteLock();
237238
}
238239

239240
boolean hasReadLock() {
240-
return this.dirLock.getReadHoldCount() > 0 || hasWriteLock();
241+
return namesystem.hasReadLock();
241242
}
242243

244+
@Deprecated // dirLock is obsolete, use namesystem.fsLock instead
243245
public int getReadHoldCount() {
244-
return this.dirLock.getReadHoldCount();
246+
return namesystem.getReadHoldCount();
245247
}
246248

249+
@Deprecated // dirLock is obsolete, use namesystem.fsLock instead
247250
public int getWriteHoldCount() {
248-
return this.dirLock.getWriteHoldCount();
251+
return namesystem.getWriteHoldCount();
249252
}
250253

251254
public int getListLimit() {
@@ -273,7 +276,6 @@ public enum DirOp {
273276
};
274277

275278
FSDirectory(FSNamesystem ns, Configuration conf) throws IOException {
276-
this.dirLock = new ReentrantReadWriteLock(true); // fair
277279
this.inodeId = new INodeId();
278280
rootDir = createRoot(ns);
279281
inodeMap = INodeMap.newInstance(rootDir);
@@ -1492,12 +1494,7 @@ public final void removeFromInodeMap(List<? extends INode> inodes) {
14921494
* @return The inode associated with the given id
14931495
*/
14941496
public INode getInode(long id) {
1495-
readLock();
1496-
try {
1497-
return inodeMap.get(id);
1498-
} finally {
1499-
readUnlock();
1500-
}
1497+
return inodeMap.get(id);
15011498
}
15021499

15031500
@VisibleForTesting

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3723,6 +3723,7 @@ INodeFile getBlockCollection(BlockInfo b) {
37233723

37243724
@Override
37253725
public INodeFile getBlockCollection(long id) {
3726+
assert hasReadLock() : "Accessing INode id = " + id + " without read lock";
37263727
INode inode = getFSDirectory().getInode(id);
37273728
return inode == null ? null : inode.asFile();
37283729
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -338,7 +338,7 @@ public void run() {
338338
}
339339

340340
final Long zoneId;
341-
dir.readLock();
341+
dir.getFSNamesystem().readLock();
342342
try {
343343
zoneId = getReencryptionStatus().getNextUnprocessedZone();
344344
if (zoneId == null) {
@@ -350,7 +350,7 @@ public void run() {
350350
getReencryptionStatus().markZoneStarted(zoneId);
351351
resetSubmissionTracker(zoneId);
352352
} finally {
353-
dir.readUnlock();
353+
dir.getFSNamesystem().readUnlock();
354354
}
355355

356356
try {

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -570,7 +570,8 @@ public void testMultipleReplicasScheduledForUpgradeDomain() throws Exception {
570570
BlockInfo storedBlock = bm.getStoredBlock(b.getLocalBlock());
571571

572572
// The block should be replicated OK - so Reconstruction Work will be null
573-
BlockReconstructionWork work = bm.scheduleReconstruction(storedBlock, 2);
573+
BlockReconstructionWork work = scheduleReconstruction(
574+
cluster.getNamesystem(), storedBlock, 2);
574575
assertNull(work);
575576
// Set the upgradeDomain to "3" for the 3 nodes hosting the block.
576577
// Then alternately set the remaining 3 nodes to have an upgradeDomain
@@ -586,7 +587,8 @@ public void testMultipleReplicasScheduledForUpgradeDomain() throws Exception {
586587
}
587588
}
588589
// Now reconWork is non-null and 2 extra targets are needed
589-
work = bm.scheduleReconstruction(storedBlock, 2);
590+
work = scheduleReconstruction(
591+
cluster.getNamesystem(), storedBlock, 2);
590592
assertEquals(2, work.getAdditionalReplRequired());
591593

592594
// Add the block to the replication queue and ensure it is replicated
@@ -598,6 +600,16 @@ public void testMultipleReplicasScheduledForUpgradeDomain() throws Exception {
598600
}
599601
}
600602

603+
static BlockReconstructionWork scheduleReconstruction(
604+
FSNamesystem fsn, BlockInfo block, int priority) {
605+
fsn.writeLock();
606+
try {
607+
return fsn.getBlockManager().scheduleReconstruction(block, priority);
608+
} finally {
609+
fsn.writeUnlock();
610+
}
611+
}
612+
601613
@Test
602614
public void testUnderReplicatedRespectsRacksAndUpgradeDomain()
603615
throws Exception {

0 commit comments

Comments
 (0)