Skip to content

Commit a380703

Browse files
umbrantGergo Repas
authored andcommitted
HDFS-13702. Remove HTrace hooks from DFSClient to reduce CPU usage. Contributed by Todd Lipcon.
(cherry picked from commit 5d748bd) Change-Id: If054510f4b32203bba632ec1456110c8ef82bc8a
1 parent 227b6aa commit a380703

File tree

12 files changed

+68
-178
lines changed

12 files changed

+68
-178
lines changed

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2977,25 +2977,6 @@ TraceScope newSrcDstTraceScope(String description, String src, String dst) {
29772977
return scope;
29782978
}
29792979

2980-
/**
2981-
* Full detailed tracing for read requests: path, position in the file,
2982-
* and length.
2983-
*
2984-
* @param reqLen requested length
2985-
*/
2986-
TraceScope newReaderTraceScope(String description, String path, long pos,
2987-
int reqLen) {
2988-
TraceScope scope = newPathTraceScope(description, path);
2989-
scope.addKVAnnotation("pos", Long.toString(pos));
2990-
scope.addKVAnnotation("reqLen", Integer.toString(reqLen));
2991-
return scope;
2992-
}
2993-
2994-
/** Add the returned length info to the scope. */
2995-
void addRetLenToReaderScope(TraceScope scope, int retLen) {
2996-
scope.addKVAnnotation("retLen", Integer.toString(retLen));
2997-
}
2998-
29992980
/**
30002981
* Get the erasure coding policy information for the specified path
30012982
*

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

Lines changed: 6 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -84,8 +84,6 @@
8484
import org.apache.hadoop.util.StopWatch;
8585
import org.apache.hadoop.util.StringUtils;
8686
import org.apache.htrace.core.SpanId;
87-
import org.apache.htrace.core.TraceScope;
88-
import org.apache.htrace.core.Tracer;
8987

9088
import com.google.common.annotations.VisibleForTesting;
9189

@@ -640,7 +638,6 @@ protected BlockReader getBlockReader(LocatedBlock targetBlock,
640638
setClientCacheContext(dfsClient.getClientContext()).
641639
setUserGroupInformation(dfsClient.ugi).
642640
setConfiguration(dfsClient.getConfiguration()).
643-
setTracer(dfsClient.getTracer()).
644641
build();
645642
}
646643

@@ -820,31 +817,14 @@ public synchronized int read(@Nonnull final byte buf[], int off, int len)
820817
}
821818
ReaderStrategy byteArrayReader =
822819
new ByteArrayStrategy(buf, off, len, readStatistics, dfsClient);
823-
try (TraceScope scope =
824-
dfsClient.newReaderTraceScope("DFSInputStream#byteArrayRead",
825-
src, getPos(), len)) {
826-
int retLen = readWithStrategy(byteArrayReader);
827-
if (retLen < len) {
828-
dfsClient.addRetLenToReaderScope(scope, retLen);
829-
}
830-
return retLen;
831-
}
820+
return readWithStrategy(byteArrayReader);
832821
}
833822

834823
@Override
835824
public synchronized int read(final ByteBuffer buf) throws IOException {
836825
ReaderStrategy byteBufferReader =
837826
new ByteBufferStrategy(buf, readStatistics, dfsClient);
838-
int reqLen = buf.remaining();
839-
try (TraceScope scope =
840-
dfsClient.newReaderTraceScope("DFSInputStream#byteBufferRead",
841-
src, getPos(), reqLen)){
842-
int retLen = readWithStrategy(byteBufferReader);
843-
if (retLen < reqLen) {
844-
dfsClient.addRetLenToReaderScope(scope, retLen);
845-
}
846-
return retLen;
847-
}
827+
return readWithStrategy(byteBufferReader);
848828
}
849829

850830
private DNAddrPair chooseDataNode(LocatedBlock block,
@@ -1025,16 +1005,12 @@ private Callable<ByteBuffer> getFromOneDataNode(final DNAddrPair datanode,
10251005
final ByteBuffer bb,
10261006
final CorruptedBlocks corruptedBlocks,
10271007
final int hedgedReadId) {
1028-
final SpanId parentSpanId = Tracer.getCurrentSpanId();
10291008
return new Callable<ByteBuffer>() {
10301009
@Override
10311010
public ByteBuffer call() throws Exception {
10321011
DFSClientFaultInjector.get().sleepBeforeHedgedGet();
1033-
try (TraceScope ignored = dfsClient.getTracer().
1034-
newScope("hedgedRead" + hedgedReadId, parentSpanId)) {
1035-
actualGetFromOneDataNode(datanode, start, end, bb, corruptedBlocks);
1036-
return bb;
1037-
}
1012+
actualGetFromOneDataNode(datanode, start, end, bb, corruptedBlocks);
1013+
return bb;
10381014
}
10391015
};
10401016
}
@@ -1332,16 +1308,8 @@ public int read(long position, byte[] buffer, int offset, int length)
13321308
if (length == 0) {
13331309
return 0;
13341310
}
1335-
try (TraceScope scope = dfsClient.
1336-
newReaderTraceScope("DFSInputStream#byteArrayPread",
1337-
src, position, length)) {
1338-
ByteBuffer bb = ByteBuffer.wrap(buffer, offset, length);
1339-
int retLen = pread(position, bb);
1340-
if (retLen < length) {
1341-
dfsClient.addRetLenToReaderScope(scope, retLen);
1342-
}
1343-
return retLen;
1344-
}
1311+
ByteBuffer bb = ByteBuffer.wrap(buffer, offset, length);
1312+
return pread(position, bb);
13451313
}
13461314

13471315
private int pread(long position, ByteBuffer buffer)

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,6 @@
7575

7676
import com.google.common.annotations.VisibleForTesting;
7777
import com.google.common.base.Preconditions;
78-
import org.apache.htrace.core.Tracer;
7978

8079
import org.slf4j.Logger;
8180
import org.slf4j.LoggerFactory;
@@ -189,11 +188,6 @@ public boolean getSupportsReceiptVerification() {
189188
*/
190189
private Configuration configuration;
191190

192-
/**
193-
* The HTrace tracer to use.
194-
*/
195-
private Tracer tracer;
196-
197191
/**
198192
* Information about the domain socket path we should use to connect to the
199193
* local peer-- or null if we haven't examined the local domain socket.
@@ -298,11 +292,6 @@ public BlockReaderFactory setConfiguration(
298292
return this;
299293
}
300294

301-
public BlockReaderFactory setTracer(Tracer tracer) {
302-
this.tracer = tracer;
303-
return this;
304-
}
305-
306295
@VisibleForTesting
307296
public static void setFailureInjectorForTesting(FailureInjector injector) {
308297
failureInjector = injector;
@@ -447,7 +436,7 @@ private BlockReader getLegacyBlockReaderLocal() throws IOException {
447436
try {
448437
return BlockReaderLocalLegacy.newBlockReader(conf,
449438
userGroupInformation, configuration, fileName, block, token,
450-
datanode, startOffset, length, storageType, tracer);
439+
datanode, startOffset, length, storageType);
451440
} catch (RemoteException remoteException) {
452441
ioe = remoteException.unwrapRemoteException(
453442
InvalidToken.class, AccessControlException.class);
@@ -505,7 +494,6 @@ private BlockReader getBlockReaderLocal() throws InvalidToken {
505494
setVerifyChecksum(verifyChecksum).
506495
setCachingStrategy(cachingStrategy).
507496
setStorageType(storageType).
508-
setTracer(tracer).
509497
build();
510498
}
511499

@@ -856,7 +844,7 @@ private BlockReader getRemoteBlockReader(Peer peer) throws IOException {
856844
return BlockReaderRemote.newBlockReader(
857845
fileName, block, token, startOffset, length,
858846
verifyChecksum, clientName, peer, datanode,
859-
clientContext.getPeerCache(), cachingStrategy, tracer,
847+
clientContext.getPeerCache(), cachingStrategy,
860848
networkDistance);
861849
}
862850

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java

Lines changed: 38 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,6 @@
3535
import org.apache.hadoop.util.DataChecksum;
3636
import org.apache.hadoop.util.DirectBufferPool;
3737
import org.apache.hadoop.util.Timer;
38-
import org.apache.htrace.core.TraceScope;
39-
import org.apache.htrace.core.Tracer;
4038
import org.slf4j.Logger;
4139
import org.slf4j.LoggerFactory;
4240

@@ -83,7 +81,6 @@ public static class Builder {
8381
private long dataPos;
8482
private ExtendedBlock block;
8583
private StorageType storageType;
86-
private Tracer tracer;
8784
private ShortCircuitConf shortCircuitConf;
8885

8986
public Builder(ShortCircuitConf conf) {
@@ -131,11 +128,6 @@ public Builder setStorageType(StorageType storageType) {
131128
return this;
132129
}
133130

134-
public Builder setTracer(Tracer tracer) {
135-
this.tracer = tracer;
136-
return this;
137-
}
138-
139131
public BlockReaderLocal build() {
140132
Preconditions.checkNotNull(replica);
141133
return new BlockReaderLocal(this);
@@ -244,11 +236,6 @@ public BlockReaderLocal build() {
244236
*/
245237
private StorageType storageType;
246238

247-
/**
248-
* The Tracer to use.
249-
*/
250-
private final Tracer tracer;
251-
252239
private BlockReaderLocal(Builder builder) {
253240
this.replica = builder.replica;
254241
this.dataIn = replica.getDataStream().getChannel();
@@ -278,7 +265,6 @@ private BlockReaderLocal(Builder builder) {
278265
}
279266
this.maxReadaheadLength = maxReadaheadChunks * bytesPerChecksum;
280267
this.storageType = builder.storageType;
281-
this.tracer = builder.tracer;
282268

283269
if (builder.shortCircuitConf.isScrMetricsEnabled()) {
284270
metricsInitializationLock.lock();
@@ -360,52 +346,49 @@ private synchronized int drainDataBuf(ByteBuffer buf) {
360346
*/
361347
private synchronized int fillBuffer(ByteBuffer buf, boolean canSkipChecksum)
362348
throws IOException {
363-
try (TraceScope ignored = tracer.newScope(
364-
"BlockReaderLocal#fillBuffer(" + block.getBlockId() + ")")) {
365-
int total = 0;
366-
long startDataPos = dataPos;
367-
int startBufPos = buf.position();
368-
while (buf.hasRemaining()) {
369-
int nRead = blockReaderIoProvider.read(dataIn, buf, dataPos);
370-
if (nRead < 0) {
371-
break;
372-
}
373-
dataPos += nRead;
374-
total += nRead;
375-
}
376-
if (canSkipChecksum) {
377-
freeChecksumBufIfExists();
378-
return total;
349+
int total = 0;
350+
long startDataPos = dataPos;
351+
int startBufPos = buf.position();
352+
while (buf.hasRemaining()) {
353+
int nRead = blockReaderIoProvider.read(dataIn, buf, dataPos);
354+
if (nRead < 0) {
355+
break;
379356
}
380-
if (total > 0) {
381-
try {
382-
buf.limit(buf.position());
383-
buf.position(startBufPos);
384-
createChecksumBufIfNeeded();
385-
int checksumsNeeded = (total + bytesPerChecksum - 1) /
386-
bytesPerChecksum;
387-
checksumBuf.clear();
388-
checksumBuf.limit(checksumsNeeded * checksumSize);
389-
long checksumPos = BlockMetadataHeader.getHeaderSize()
390-
+ ((startDataPos / bytesPerChecksum) * checksumSize);
391-
while (checksumBuf.hasRemaining()) {
392-
int nRead = checksumIn.read(checksumBuf, checksumPos);
393-
if (nRead < 0) {
394-
throw new IOException("Got unexpected checksum file EOF at " +
395-
checksumPos + ", block file position " + startDataPos +
396-
" for block " + block + " of file " + filename);
397-
}
398-
checksumPos += nRead;
357+
dataPos += nRead;
358+
total += nRead;
359+
}
360+
if (canSkipChecksum) {
361+
freeChecksumBufIfExists();
362+
return total;
363+
}
364+
if (total > 0) {
365+
try {
366+
buf.limit(buf.position());
367+
buf.position(startBufPos);
368+
createChecksumBufIfNeeded();
369+
int checksumsNeeded = (total + bytesPerChecksum - 1) /
370+
bytesPerChecksum;
371+
checksumBuf.clear();
372+
checksumBuf.limit(checksumsNeeded * checksumSize);
373+
long checksumPos = BlockMetadataHeader.getHeaderSize()
374+
+ ((startDataPos / bytesPerChecksum) * checksumSize);
375+
while (checksumBuf.hasRemaining()) {
376+
int nRead = checksumIn.read(checksumBuf, checksumPos);
377+
if (nRead < 0) {
378+
throw new IOException("Got unexpected checksum file EOF at " +
379+
checksumPos + ", block file position " + startDataPos +
380+
" for block " + block + " of file " + filename);
399381
}
400-
checksumBuf.flip();
401-
402-
checksum.verifyChunkedSums(buf, checksumBuf, filename, startDataPos);
403-
} finally {
404-
buf.position(buf.limit());
382+
checksumPos += nRead;
405383
}
384+
checksumBuf.flip();
385+
386+
checksum.verifyChunkedSums(buf, checksumBuf, filename, startDataPos);
387+
} finally {
388+
buf.position(buf.limit());
406389
}
407-
return total;
408390
}
391+
return total;
409392
}
410393

411394
private boolean createNoChecksumContext() {

0 commit comments

Comments
 (0)