Skip to content

Commit 652cfa1

Browse files
author
Brandon Li
committed
HDFS-5252. Stable write is not handled correctly in someplace. Contributed by Brandon Li
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1539740 13f79535-47bb-0310-9956-ffa450edef68
1 parent 3abce59 commit 652cfa1

File tree

7 files changed

+189
-1
lines changed

7 files changed

+189
-1
lines changed

hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,11 @@
1919

2020
import java.io.IOException;
2121

22+
import org.apache.hadoop.nfs.nfs3.FileHandle;
2223
import org.apache.hadoop.oncrpc.XDR;
2324

25+
import com.google.common.annotations.VisibleForTesting;
26+
2427
/**
2528
* READ3 Request
2629
*/
@@ -34,11 +37,25 @@ public READ3Request(XDR xdr) throws IOException {
3437
count = xdr.readInt();
3538
}
3639

40+
@VisibleForTesting
41+
public READ3Request(FileHandle handle, long offset, int count) {
42+
super(handle);
43+
this.offset = offset;
44+
this.count = count;
45+
}
46+
3747
public long getOffset() {
3848
return this.offset;
3949
}
4050

4151
public int getCount() {
4252
return this.count;
4353
}
54+
55+
@Override
56+
public void serialize(XDR xdr) {
57+
handle.serialize(xdr);
58+
xdr.writeLongAsHyper(offset);
59+
xdr.writeInt(count);
60+
}
4461
}

hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,12 @@ public static WccData createWccData(final WccAttr preOpAttr,
109109
* Send a write response to the netty network socket channel
110110
*/
111111
public static void writeChannel(Channel channel, XDR out, int xid) {
112+
if (channel == null) {
113+
RpcProgramNfs3.LOG
114+
.info("Null channel should only happen in tests. Do nothing.");
115+
return;
116+
}
117+
112118
if (RpcProgramNfs3.LOG.isDebugEnabled()) {
113119
RpcProgramNfs3.LOG.debug(WRITE_RPC_END + xid);
114120
}

hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1007,6 +1007,23 @@ private void doSingleWrite(final WriteCtx writeCtx) {
10071007
}
10081008

10091009
if (!writeCtx.getReplied()) {
1010+
if (stableHow != WriteStableHow.UNSTABLE) {
1011+
LOG.info("Do sync for stable write:" + writeCtx);
1012+
try {
1013+
if (stableHow == WriteStableHow.DATA_SYNC) {
1014+
fos.hsync();
1015+
} else {
1016+
Preconditions.checkState(stableHow == WriteStableHow.FILE_SYNC,
1017+
"Unknown WriteStableHow:" + stableHow);
1018+
// Sync file data and length
1019+
fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
1020+
}
1021+
} catch (IOException e) {
1022+
LOG.error("hsync failed with writeCtx:" + writeCtx + " error:" + e);
1023+
throw e;
1024+
}
1025+
}
1026+
10101027
WccAttr preOpAttr = latestAttr.getWccAttr();
10111028
WccData fileWcc = new WccData(preOpAttr, latestAttr);
10121029
if (writeCtx.getOriginalCount() != WriteCtx.INVALID_ORIGINAL_COUNT) {

hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,8 @@
126126
import org.jboss.netty.channel.Channel;
127127
import org.jboss.netty.channel.ChannelHandlerContext;
128128

129+
import com.google.common.annotations.VisibleForTesting;
130+
129131
/**
130132
* RPC program corresponding to nfs daemon. See {@link Nfs3}.
131133
*/
@@ -1975,4 +1977,9 @@ private boolean checkAccessPrivilege(final InetAddress client,
19751977
}
19761978
return true;
19771979
}
1980+
1981+
@VisibleForTesting
1982+
WriteManager getWriteManager() {
1983+
return this.writeManager;
1984+
}
19781985
}

hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
import org.apache.hadoop.util.Daemon;
4646
import org.jboss.netty.channel.Channel;
4747

48+
import com.google.common.annotations.VisibleForTesting;
4849
import com.google.common.collect.Maps;
4950

5051
/**
@@ -262,6 +263,11 @@ Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle dirHandle,
262263
}
263264
return attr;
264265
}
266+
267+
@VisibleForTesting
268+
ConcurrentMap<FileHandle, OpenFileCtx> getOpenFileMap() {
269+
return this.openFileMap;
270+
}
265271

266272
/**
267273
* StreamMonitor wakes up periodically to find and closes idle streams.

hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java

Lines changed: 134 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,21 +17,41 @@
1717
*/
1818
package org.apache.hadoop.hdfs.nfs.nfs3;
1919

20+
import static org.junit.Assert.assertTrue;
21+
import static org.junit.Assert.fail;
22+
2023
import java.io.IOException;
24+
import java.net.InetAddress;
2125
import java.nio.ByteBuffer;
26+
import java.util.ArrayList;
27+
import java.util.Arrays;
28+
import java.util.List;
29+
import java.util.concurrent.ConcurrentMap;
2230
import java.util.concurrent.ConcurrentNavigableMap;
2331

2432
import junit.framework.Assert;
2533

2634
import org.apache.hadoop.hdfs.DFSClient;
35+
import org.apache.hadoop.hdfs.HdfsConfiguration;
36+
import org.apache.hadoop.hdfs.MiniDFSCluster;
2737
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
2838
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
2939
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
40+
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
41+
import org.apache.hadoop.hdfs.server.namenode.NameNode;
3042
import org.apache.hadoop.nfs.nfs3.FileHandle;
3143
import org.apache.hadoop.nfs.nfs3.IdUserGroup;
44+
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
3245
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
3346
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
47+
import org.apache.hadoop.nfs.nfs3.request.CREATE3Request;
48+
import org.apache.hadoop.nfs.nfs3.request.READ3Request;
49+
import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
3450
import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
51+
import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
52+
import org.apache.hadoop.nfs.nfs3.response.READ3Response;
53+
import org.apache.hadoop.oncrpc.XDR;
54+
import org.apache.hadoop.oncrpc.security.SecurityHandler;
3555
import org.junit.Test;
3656
import org.mockito.Mockito;
3757

@@ -105,7 +125,7 @@ public void testAlterWriteRequest() throws IOException {
105125
Assert.assertTrue(limit - position == 1);
106126
Assert.assertTrue(appendedData.get(position) == (byte) 19);
107127
}
108-
128+
109129
@Test
110130
// Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS, which
111131
// includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX,
@@ -162,4 +182,117 @@ public void testCheckCommit() throws IOException {
162182
ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
163183
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
164184
}
185+
186+
private void waitWrite(RpcProgramNfs3 nfsd, FileHandle handle, int maxWaitTime)
187+
throws InterruptedException {
188+
int waitedTime = 0;
189+
ConcurrentMap<FileHandle, OpenFileCtx> openFileMap = nfsd.getWriteManager()
190+
.getOpenFileMap();
191+
OpenFileCtx ctx = openFileMap.get(handle);
192+
assertTrue(ctx != null);
193+
do {
194+
Thread.sleep(3000);
195+
waitedTime += 3000;
196+
if (ctx.getPendingWritesForTest().size() == 0) {
197+
return;
198+
}
199+
} while (waitedTime < maxWaitTime);
200+
201+
fail("Write can't finish.");
202+
}
203+
204+
@Test
205+
public void testWriteStableHow() throws IOException, InterruptedException {
206+
HdfsConfiguration config = new HdfsConfiguration();
207+
DFSClient client = null;
208+
MiniDFSCluster cluster = null;
209+
RpcProgramNfs3 nfsd;
210+
SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
211+
Mockito.when(securityHandler.getUser()).thenReturn(
212+
System.getProperty("user.name"));
213+
214+
try {
215+
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
216+
cluster.waitActive();
217+
client = new DFSClient(NameNode.getAddress(config), config);
218+
219+
// Start nfs
220+
List<String> exports = new ArrayList<String>();
221+
exports.add("/");
222+
Nfs3 nfs3 = new Nfs3(exports, config);
223+
nfs3.start(false);
224+
nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
225+
226+
HdfsFileStatus status = client.getFileInfo("/");
227+
FileHandle rootHandle = new FileHandle(status.getFileId());
228+
// Create file1
229+
CREATE3Request createReq = new CREATE3Request(rootHandle, "file1",
230+
Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
231+
XDR createXdr = new XDR();
232+
createReq.serialize(createXdr);
233+
CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
234+
securityHandler, InetAddress.getLocalHost());
235+
FileHandle handle = createRsp.getObjHandle();
236+
237+
// Test DATA_SYNC
238+
byte[] buffer = new byte[10];
239+
for (int i = 0; i < 10; i++) {
240+
buffer[i] = (byte) i;
241+
}
242+
WRITE3Request writeReq = new WRITE3Request(handle, 0, 10,
243+
WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
244+
XDR writeXdr = new XDR();
245+
writeReq.serialize(writeXdr);
246+
nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
247+
InetAddress.getLocalHost());
248+
249+
waitWrite(nfsd, handle, 60000);
250+
251+
// Readback
252+
READ3Request readReq = new READ3Request(handle, 0, 10);
253+
XDR readXdr = new XDR();
254+
readReq.serialize(readXdr);
255+
READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
256+
securityHandler, InetAddress.getLocalHost());
257+
258+
assertTrue(Arrays.equals(buffer, readRsp.getData().array()));
259+
260+
// Test FILE_SYNC
261+
262+
// Create file2
263+
CREATE3Request createReq2 = new CREATE3Request(rootHandle, "file2",
264+
Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
265+
XDR createXdr2 = new XDR();
266+
createReq2.serialize(createXdr2);
267+
CREATE3Response createRsp2 = nfsd.create(createXdr2.asReadOnlyWrap(),
268+
securityHandler, InetAddress.getLocalHost());
269+
FileHandle handle2 = createRsp2.getObjHandle();
270+
271+
WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10,
272+
WriteStableHow.FILE_SYNC, ByteBuffer.wrap(buffer));
273+
XDR writeXdr2 = new XDR();
274+
writeReq2.serialize(writeXdr2);
275+
nfsd.write(writeXdr2.asReadOnlyWrap(), null, 1, securityHandler,
276+
InetAddress.getLocalHost());
277+
278+
waitWrite(nfsd, handle2, 60000);
279+
280+
// Readback
281+
READ3Request readReq2 = new READ3Request(handle2, 0, 10);
282+
XDR readXdr2 = new XDR();
283+
readReq2.serialize(readXdr2);
284+
READ3Response readRsp2 = nfsd.read(readXdr2.asReadOnlyWrap(),
285+
securityHandler, InetAddress.getLocalHost());
286+
287+
assertTrue(Arrays.equals(buffer, readRsp2.getData().array()));
288+
// FILE_SYNC should sync the file size
289+
status = client.getFileInfo("/file2");
290+
assertTrue(status.getLen() == 10);
291+
292+
} finally {
293+
if (cluster != null) {
294+
cluster.shutdown();
295+
}
296+
}
297+
}
165298
}

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -588,6 +588,8 @@ Release 2.2.1 - UNRELEASED
588588
HDFS-5458. Datanode failed volume threshold ignored if exception is thrown
589589
in getDataDirsFromURIs. (Mike Mellenthin via wang)
590590

591+
HDFS-5252. Stable write is not handled correctly in someplace. (brandonli)
592+
591593
Release 2.2.0 - 2013-10-13
592594

593595
INCOMPATIBLE CHANGES

0 commit comments

Comments
 (0)