Skip to content

Commit 5f897c0

Browse files
committed
HDFS-5556. Add some more NameNode cache statistics, cache pool stats (cmccabe)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1546143 13f79535-47bb-0310-9956-ffa450edef68
1 parent a1b0ae3 commit 5f897c0

32 files changed

+600
-225
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,9 @@ Trunk (Unreleased)
218218
HDFS-5286. Flatten INodeDirectory hierarchy: Replace INodeDirectoryWithQuota
219219
with DirectoryWithQuotaFeature. (szetszwo)
220220

221+
HDFS-5556. Add some more NameNode cache statistics, cache pool stats
222+
(cmccabe)
223+
221224
OPTIMIZATIONS
222225
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
223226

hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -352,6 +352,11 @@
352352
<Method name="getReplication" />
353353
<Bug pattern="ICAST_QUESTIONABLE_UNSIGNED_RIGHT_SHIFT" />
354354
</Match>
355+
<Match>
356+
<Class name="org.apache.hadoop.hdfs.protocol.CacheDirective" />
357+
<Method name="insertInternal" />
358+
<Bug pattern="BC_UNCONFIRMED_CAST" />
359+
</Match>
355360
<!-- These two are used for shutting down and kicking the CRMon, do not need strong sync -->
356361
<Match>
357362
<Class name="org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor" />

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@
109109
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
110110
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
111111
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
112+
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
112113
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
113114
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
114115
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@@ -2358,7 +2359,7 @@ public void removeCachePool(String poolName) throws IOException {
23582359
}
23592360
}
23602361

2361-
public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
2362+
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
23622363
checkOpen();
23632364
try {
23642365
return namenode.listCachePools("");

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@
5858
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
5959
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
6060
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
61+
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
6162
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
6263
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
6364
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@@ -1713,12 +1714,12 @@ public void removeCachePool(String poolName) throws IOException {
17131714
/**
17141715
* List all cache pools.
17151716
*
1716-
* @return A remote iterator from which you can get CachePoolInfo objects.
1717+
* @return A remote iterator from which you can get CachePoolEntry objects.
17171718
* Requests will be made as needed.
17181719
* @throws IOException
17191720
* If there was an error listing cache pools.
17201721
*/
1721-
public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
1722+
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
17221723
return dfs.listCachePools();
17231724
}
17241725
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
import org.apache.hadoop.hdfs.DistributedFileSystem;
3030
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
3131
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
32+
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
3233
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
3334
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
3435
import org.apache.hadoop.hdfs.tools.DFSAdmin;
@@ -213,12 +214,12 @@ public void removeCachePool(String poolName) throws IOException {
213214
/**
214215
* List all cache pools.
215216
*
216-
* @return A remote iterator from which you can get CachePoolInfo objects.
217+
* @return A remote iterator from which you can get CachePoolEntry objects.
217218
* Requests will be made as needed.
218219
* @throws IOException
219220
* If there was an error listing cache pools.
220221
*/
221-
public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
222+
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
222223
return dfs.listCachePools();
223224
}
224225
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java

Lines changed: 70 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@
2121
import org.apache.hadoop.classification.InterfaceAudience;
2222
import org.apache.hadoop.fs.Path;
2323
import org.apache.hadoop.hdfs.server.namenode.CachePool;
24+
import org.apache.hadoop.util.IntrusiveCollection;
25+
import org.apache.hadoop.util.IntrusiveCollection.Element;
2426

2527
import com.google.common.base.Preconditions;
2628

@@ -30,32 +32,32 @@
3032
* This is an implementation class, not part of the public API.
3133
*/
3234
@InterfaceAudience.Private
33-
public final class CacheDirective {
34-
private final long entryId;
35+
public final class CacheDirective implements IntrusiveCollection.Element {
36+
private final long id;
3537
private final String path;
3638
private final short replication;
37-
private final CachePool pool;
39+
private CachePool pool;
3840
private long bytesNeeded;
3941
private long bytesCached;
4042
private long filesAffected;
43+
private Element prev;
44+
private Element next;
4145

42-
public CacheDirective(long entryId, String path,
43-
short replication, CachePool pool) {
44-
Preconditions.checkArgument(entryId > 0);
45-
this.entryId = entryId;
46+
public CacheDirective(long id, String path,
47+
short replication) {
48+
Preconditions.checkArgument(id > 0);
49+
this.id = id;
4650
Preconditions.checkArgument(replication > 0);
4751
this.path = path;
48-
Preconditions.checkNotNull(pool);
4952
this.replication = replication;
5053
Preconditions.checkNotNull(path);
51-
this.pool = pool;
5254
this.bytesNeeded = 0;
5355
this.bytesCached = 0;
5456
this.filesAffected = 0;
5557
}
5658

57-
public long getEntryId() {
58-
return entryId;
59+
public long getId() {
60+
return id;
5961
}
6062

6163
public String getPath() {
@@ -70,9 +72,9 @@ public short getReplication() {
7072
return replication;
7173
}
7274

73-
public CacheDirectiveInfo toDirective() {
75+
public CacheDirectiveInfo toInfo() {
7476
return new CacheDirectiveInfo.Builder().
75-
setId(entryId).
77+
setId(id).
7678
setPath(new Path(path)).
7779
setReplication(replication).
7880
setPool(pool.getPoolName()).
@@ -88,13 +90,13 @@ public CacheDirectiveStats toStats() {
8890
}
8991

9092
public CacheDirectiveEntry toEntry() {
91-
return new CacheDirectiveEntry(toDirective(), toStats());
93+
return new CacheDirectiveEntry(toInfo(), toStats());
9294
}
9395

9496
@Override
9597
public String toString() {
9698
StringBuilder builder = new StringBuilder();
97-
builder.append("{ entryId:").append(entryId).
99+
builder.append("{ id:").append(id).
98100
append(", path:").append(path).
99101
append(", replication:").append(replication).
100102
append(", pool:").append(pool).
@@ -113,12 +115,12 @@ public boolean equals(Object o) {
113115
return false;
114116
}
115117
CacheDirective other = (CacheDirective)o;
116-
return entryId == other.entryId;
118+
return id == other.id;
117119
}
118120

119121
@Override
120122
public int hashCode() {
121-
return new HashCodeBuilder().append(entryId).toHashCode();
123+
return new HashCodeBuilder().append(id).toHashCode();
122124
}
123125

124126
public long getBytesNeeded() {
@@ -156,4 +158,55 @@ public void clearFilesAffected() {
156158
public void incrementFilesAffected() {
157159
this.filesAffected++;
158160
}
161+
162+
@SuppressWarnings("unchecked")
163+
@Override // IntrusiveCollection.Element
164+
public void insertInternal(IntrusiveCollection<? extends Element> list,
165+
Element prev, Element next) {
166+
assert this.pool == null;
167+
this.pool = ((CachePool.DirectiveList)list).getCachePool();
168+
this.prev = prev;
169+
this.next = next;
170+
}
171+
172+
@Override // IntrusiveCollection.Element
173+
public void setPrev(IntrusiveCollection<? extends Element> list, Element prev) {
174+
assert list == pool.getDirectiveList();
175+
this.prev = prev;
176+
}
177+
178+
@Override // IntrusiveCollection.Element
179+
public void setNext(IntrusiveCollection<? extends Element> list, Element next) {
180+
assert list == pool.getDirectiveList();
181+
this.next = next;
182+
}
183+
184+
@Override // IntrusiveCollection.Element
185+
public void removeInternal(IntrusiveCollection<? extends Element> list) {
186+
assert list == pool.getDirectiveList();
187+
this.pool = null;
188+
this.prev = null;
189+
this.next = null;
190+
}
191+
192+
@Override // IntrusiveCollection.Element
193+
public Element getPrev(IntrusiveCollection<? extends Element> list) {
194+
if (list != pool.getDirectiveList()) {
195+
return null;
196+
}
197+
return this.prev;
198+
}
199+
200+
@Override // IntrusiveCollection.Element
201+
public Element getNext(IntrusiveCollection<? extends Element> list) {
202+
if (list != pool.getDirectiveList()) {
203+
return null;
204+
}
205+
return this.next;
206+
}
207+
208+
@Override // IntrusiveCollection.Element
209+
public boolean isInList(IntrusiveCollection<? extends Element> list) {
210+
return pool == null ? false : list == pool.getDirectiveList();
211+
}
159212
};

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -94,21 +94,21 @@ private CacheDirectiveStats(long bytesNeeded, long bytesCached,
9494
/**
9595
* @return The bytes needed.
9696
*/
97-
public Long getBytesNeeded() {
97+
public long getBytesNeeded() {
9898
return bytesNeeded;
9999
}
100100

101101
/**
102102
* @return The bytes cached.
103103
*/
104-
public Long getBytesCached() {
104+
public long getBytesCached() {
105105
return bytesCached;
106106
}
107107

108108
/**
109109
* @return The files affected.
110110
*/
111-
public Long getFilesAffected() {
111+
public long getFilesAffected() {
112112
return filesAffected;
113113
}
114114

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
19+
package org.apache.hadoop.hdfs.protocol;
20+
21+
import org.apache.hadoop.classification.InterfaceAudience;
22+
import org.apache.hadoop.classification.InterfaceStability;
23+
24+
/**
25+
* Describes a Cache Pool entry.
26+
*/
27+
@InterfaceAudience.Public
28+
@InterfaceStability.Evolving
29+
public class CachePoolEntry {
30+
private final CachePoolInfo info;
31+
private final CachePoolStats stats;
32+
33+
public CachePoolEntry(CachePoolInfo info, CachePoolStats stats) {
34+
this.info = info;
35+
this.stats = stats;
36+
}
37+
38+
public CachePoolInfo getInfo() {
39+
return info;
40+
}
41+
42+
public CachePoolStats getStats() {
43+
return stats;
44+
}
45+
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
import org.apache.commons.logging.LogFactory;
3131
import org.apache.hadoop.classification.InterfaceAudience;
3232
import org.apache.hadoop.classification.InterfaceStability;
33+
import org.apache.hadoop.fs.InvalidRequestException;
3334
import org.apache.hadoop.fs.permission.FsPermission;
3435
import org.apache.hadoop.fs.permission.PermissionStatus;
3536
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
@@ -150,7 +151,10 @@ public int hashCode() {
150151

151152
public static void validate(CachePoolInfo info) throws IOException {
152153
if (info == null) {
153-
throw new IOException("CachePoolInfo is null");
154+
throw new InvalidRequestException("CachePoolInfo is null");
155+
}
156+
if ((info.getWeight() != null) && (info.getWeight() < 0)) {
157+
throw new InvalidRequestException("CachePool weight is negative.");
154158
}
155159
validateName(info.poolName);
156160
}

0 commit comments

Comments
 (0)