Skip to content

Commit 6ee5440

Browse files
committed
HDFS-5323. Remove some deadcode in BlockManager (Colin Patrick McCabe)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1530814 13f79535-47bb-0310-9956-ffa450edef68
1 parent daae25a commit 6ee5440

File tree

4 files changed

+8
-14
lines changed

4 files changed

+8
-14
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -306,6 +306,8 @@ Release 2.3.0 - UNRELEASED
306306
HDFS-4510. Cover classes ClusterJspHelper/NamenodeJspHelper with unit
307307
tests. (Andrey Klochkov via kihwal)
308308

309+
HDFS-5323. Remove some deadcode in BlockManager (Colin Patrick McCabe)
310+
309311
OPTIMIZATIONS
310312

311313
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@
7575
import org.apache.hadoop.net.Node;
7676
import org.apache.hadoop.security.UserGroupInformation;
7777
import org.apache.hadoop.util.Daemon;
78+
import org.apache.hadoop.util.LightWeightGSet;
7879
import org.apache.hadoop.util.Time;
7980

8081
import com.google.common.annotations.VisibleForTesting;
@@ -90,9 +91,6 @@ public class BlockManager {
9091
static final Log LOG = LogFactory.getLog(BlockManager.class);
9192
public static final Log blockLog = NameNode.blockStateChangeLog;
9293

93-
/** Default load factor of map */
94-
public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f;
95-
9694
private static final String QUEUE_REASON_CORRUPT_STATE =
9795
"it has the wrong state or generation stamp";
9896

@@ -244,7 +242,8 @@ public BlockManager(final Namesystem namesystem, final FSClusterStats stats,
244242
invalidateBlocks = new InvalidateBlocks(datanodeManager);
245243

246244
// Compute the map capacity by allocating 2% of total memory
247-
blocksMap = new BlocksMap(DEFAULT_MAP_LOAD_FACTOR);
245+
blocksMap = new BlocksMap(
246+
LightWeightGSet.computeCapacity(2.0, "BlocksMap"));
248247
blockplacement = BlockPlacementPolicy.getInstance(
249248
conf, stats, datanodeManager.getNetworkTopology());
250249
pendingReplications = new PendingReplicationBlocks(conf.getInt(

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -57,11 +57,11 @@ public void remove() {
5757
/** Constant {@link LightWeightGSet} capacity. */
5858
private final int capacity;
5959

60-
private volatile GSet<Block, BlockInfo> blocks;
60+
private GSet<Block, BlockInfo> blocks;
6161

62-
BlocksMap(final float loadFactor) {
62+
BlocksMap(int capacity) {
6363
// Use 2% of total memory to size the GSet capacity
64-
this.capacity = LightWeightGSet.computeCapacity(2.0, "BlocksMap");
64+
this.capacity = capacity;
6565
this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity);
6666
}
6767

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -192,17 +192,10 @@ public void testFileAdd() throws Exception {
192192
assertCounter("CreateFileOps", 1L, rb);
193193
assertCounter("FilesCreated", (long)file.depth(), rb);
194194

195-
// Blocks are stored in a hashmap. Compute its capacity, which
196-
// doubles every time the number of entries reach the threshold.
197-
int threshold = (int)(blockCapacity * BlockManager.DEFAULT_MAP_LOAD_FACTOR);
198-
while (threshold < blockCount) {
199-
blockCapacity <<= 1;
200-
}
201195
long filesTotal = file.depth() + 1; // Add 1 for root
202196
rb = getMetrics(NS_METRICS);
203197
assertGauge("FilesTotal", filesTotal, rb);
204198
assertGauge("BlocksTotal", blockCount, rb);
205-
assertGauge("BlockCapacity", blockCapacity, rb);
206199
fs.delete(file, true);
207200
filesTotal--; // reduce the filecount for deleted file
208201

0 commit comments

Comments
 (0)