Skip to content

Commit a9d5e4b

Browse files
committed
Branch for Apache Hadoop 2.0.0-alpha release.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2.0.0-alpha@1335805 13f79535-47bb-0310-9956-ffa450edef68
2 parents ef016cd + 71df136 commit a9d5e4b

File tree

51 files changed

+443
-155
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+443
-155
lines changed

hadoop-common-project/hadoop-common/CHANGES.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -416,6 +416,8 @@ Release 0.23.3 - UNRELEASED
416416
HADOOP-8327. distcpv2 and distcpv1 jars should not coexist (Dave Thompson
417417
via bobby)
418418

419+
HADOOP-8341. Fix or filter findbugs issues in hadoop-tools (bobby)
420+
419421
Release 0.23.2 - UNRELEASED
420422

421423
NEW FEATURES

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -500,6 +500,9 @@ Release 2.0.0 - UNRELEASED
500500
HDFS-3376. DFSClient fails to make connection to DN if there are many
501501
unusable cached sockets (todd)
502502

503+
HDFS-3157. Error in deleting block is keep on coming from DN even after
504+
the block report and directory scanning has happened. (Ashish Singhi via umamahesh)
505+
503506
BREAKDOWN OF HDFS-1623 SUBTASKS
504507

505508
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1804,7 +1804,8 @@ private BlockToMarkCorrupt checkReplicaCorrupt(
18041804
case COMPLETE:
18051805
case COMMITTED:
18061806
if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) {
1807-
return new BlockToMarkCorrupt(storedBlock,
1807+
return new BlockToMarkCorrupt(new BlockInfo(iblk, storedBlock
1808+
.getINode().getReplication()),
18081809
"block is " + ucState + " and reported genstamp " +
18091810
iblk.getGenerationStamp() + " does not match " +
18101811
"genstamp in block map " + storedBlock.getGenerationStamp());
@@ -1824,7 +1825,8 @@ private BlockToMarkCorrupt checkReplicaCorrupt(
18241825
if (!storedBlock.isComplete()) {
18251826
return null; // not corrupt
18261827
} else if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) {
1827-
return new BlockToMarkCorrupt(storedBlock,
1828+
return new BlockToMarkCorrupt(new BlockInfo(iblk, storedBlock
1829+
.getINode().getReplication()),
18281830
"reported " + reportedState + " replica with genstamp " +
18291831
iblk.getGenerationStamp() + " does not match COMPLETE block's " +
18301832
"genstamp in block map " + storedBlock.getGenerationStamp());

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java

Whitespace-only changes.
Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.server.blockmanagement;
19+
20+
import java.io.File;
21+
import java.io.IOException;
22+
23+
import org.apache.hadoop.conf.Configuration;
24+
import org.apache.hadoop.fs.FSDataOutputStream;
25+
import org.apache.hadoop.fs.FileSystem;
26+
import org.apache.hadoop.fs.Path;
27+
import org.apache.hadoop.hdfs.DFSConfigKeys;
28+
import org.apache.hadoop.hdfs.DFSTestUtil;
29+
import org.apache.hadoop.hdfs.HdfsConfiguration;
30+
import org.apache.hadoop.hdfs.MiniDFSCluster;
31+
import org.apache.hadoop.hdfs.protocol.Block;
32+
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
33+
import org.apache.hadoop.hdfs.server.datanode.DataNode;
34+
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
35+
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
36+
37+
import org.junit.Test;
38+
import static org.junit.Assert.assertEquals;
39+
import static org.junit.Assert.assertTrue;
40+
41+
/**
42+
* Test when RBW block is removed. Invalidation of the corrupted block happens
43+
* and then the under replicated block gets replicated to the datanode.
44+
*/
45+
public class TestRBWBlockInvalidation {
46+
private static NumberReplicas countReplicas(final FSNamesystem namesystem,
47+
ExtendedBlock block) {
48+
return namesystem.getBlockManager().countNodes(block.getLocalBlock());
49+
}
50+
51+
/**
52+
* Test when a block's replica is removed from RBW folder in one of the
53+
* datanode, namenode should ask to invalidate that corrupted block and
54+
* schedule replication for one more replica for that under replicated block.
55+
*/
56+
@Test
57+
public void testBlockInvalidationWhenRBWReplicaMissedInDN()
58+
throws IOException, InterruptedException {
59+
Configuration conf = new HdfsConfiguration();
60+
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300);
61+
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
62+
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
63+
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
64+
.build();
65+
FSDataOutputStream out = null;
66+
try {
67+
final FSNamesystem namesystem = cluster.getNamesystem();
68+
FileSystem fs = cluster.getFileSystem();
69+
Path testPath = new Path(MiniDFSCluster.getBaseDirectory(), "foo1");
70+
out = fs.create(testPath, (short) 3);
71+
out.writeBytes("HDFS-3157: " + testPath);
72+
out.hsync();
73+
String bpid = namesystem.getBlockPoolId();
74+
ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, testPath);
75+
Block block = blk.getLocalBlock();
76+
// Deleting partial block and its meta information from the RBW folder
77+
// of first datanode.
78+
DataNode dn = cluster.getDataNodes().get(0);
79+
File blockFile = DataNodeTestUtils.getBlockFile(dn, bpid, block);
80+
File metaFile = DataNodeTestUtils.getMetaFile(dn, bpid, block);
81+
assertTrue("Could not delete the block file from the RBW folder",
82+
blockFile.delete());
83+
assertTrue("Could not delete the block meta file from the RBW folder",
84+
metaFile.delete());
85+
out.close();
86+
assertEquals("The corrupt replica could not be invalidated", 0,
87+
countReplicas(namesystem, blk).corruptReplicas());
88+
/*
89+
* Sleep for 3 seconds, for under replicated block to get replicated. As
90+
* one second will be taken by ReplicationMonitor and one more second for
91+
* invalidated block to get deleted from the datanode.
92+
*/
93+
Thread.sleep(3000);
94+
blk = DFSTestUtil.getFirstBlock(fs, testPath);
95+
assertEquals("There should be three live replicas", 3,
96+
countReplicas(namesystem, blk).liveReplicas());
97+
} finally {
98+
if (out != null) {
99+
out.close();
100+
}
101+
cluster.shutdown();
102+
}
103+
}
104+
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,11 @@ public static File getBlockFile(DataNode dn, String bpid, Block b
136136
) throws IOException {
137137
return FsDatasetTestUtil.getBlockFile(dn.getFSDataset(), bpid, b);
138138
}
139+
140+
public static File getMetaFile(DataNode dn, String bpid, Block b)
141+
throws IOException {
142+
return FsDatasetTestUtil.getMetaFile(dn.getFSDataset(), bpid, b);
143+
}
139144

140145
public static boolean unlinkBlock(DataNode dn, ExtendedBlock bk, int numLinks
141146
) throws IOException {

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,12 @@ public static File getBlockFile(FsDatasetSpi<?> fsd, String bpid, Block b
3636
) throws IOException {
3737
return ((FsDatasetImpl)fsd).getBlockFile(bpid, b);
3838
}
39+
40+
public static File getMetaFile(FsDatasetSpi<?> fsd, String bpid, Block b)
41+
throws IOException {
42+
return FsDatasetUtil.getMetaFile(getBlockFile(fsd, bpid, b), b
43+
.getGenerationStamp());
44+
}
3945

4046
public static boolean unlinkBlock(FsDatasetSpi<?> fsd,
4147
ExtendedBlock block, int numLinks) throws IOException {

hadoop-mapreduce-project/CHANGES.txt

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -175,6 +175,9 @@ Release 2.0.0 - UNRELEASED
175175
MAPREDUCE-3958. RM: Remove RMNodeState and replace it with NodeState
176176
(Bikas Saha via bobby)
177177

178+
MAPREDUCE-4231. Update RAID to use the new BlockCollection interface.
179+
(szetszwo)
180+
178181
Release 0.23.3 - UNRELEASED
179182

180183
INCOMPATIBLE CHANGES
@@ -212,8 +215,13 @@ Release 0.23.3 - UNRELEASED
212215

213216
MAPREDUCE-4210. Expose listener address for WebApp (Daryn Sharp via bobby)
214217

218+
MAPREDUCE-4162. Correctly set token service (Daryn Sharp via bobby)
219+
215220
OPTIMIZATIONS
216221

222+
MAPREDUCE-3850. Avoid redundant calls for tokens in TokenCache (Daryn
223+
Sharp via bobby)
224+
217225
BUG FIXES
218226

219227
MAPREDUCE-4092. commitJob Exception does not fail job (Jon Eagles via
@@ -355,6 +363,9 @@ Release 0.23.3 - UNRELEASED
355363
MAPREDUCE-4226. ConcurrentModificationException in FileSystemCounterGroup.
356364
(tomwhite)
357365

366+
MAPREDUCE-4215. RM app page shows 500 error on appid parse error
367+
(Jonathon Eagles via tgraves)
368+
358369
Release 0.23.2 - UNRELEASED
359370

360371
INCOMPATIBLE CHANGES

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,9 @@
5050
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
5151
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
5252
import org.apache.hadoop.metrics2.source.JvmMetrics;
53+
import org.apache.hadoop.net.NetUtils;
5354
import org.apache.hadoop.security.Credentials;
55+
import org.apache.hadoop.security.SecurityUtil;
5456
import org.apache.hadoop.security.UserGroupInformation;
5557
import org.apache.hadoop.security.token.Token;
5658
import org.apache.hadoop.security.token.TokenIdentifier;
@@ -77,7 +79,8 @@ public static void main(String[] args) throws Throwable {
7779

7880
String host = args[0];
7981
int port = Integer.parseInt(args[1]);
80-
final InetSocketAddress address = new InetSocketAddress(host, port);
82+
final InetSocketAddress address =
83+
NetUtils.createSocketAddrForHost(host, port);
8184
final TaskAttemptID firstTaskid = TaskAttemptID.forName(args[2]);
8285
int jvmIdInt = Integer.parseInt(args[3]);
8386
JVMId jvmId = new JVMId(firstTaskid.getJobID(),
@@ -214,8 +217,7 @@ private static Token<JobTokenIdentifier> loadCredentials(JobConf conf,
214217
LOG.debug("loading token. # keys =" +credentials.numberOfSecretKeys() +
215218
"; from file=" + jobTokenFile);
216219
Token<JobTokenIdentifier> jt = TokenCache.getJobToken(credentials);
217-
jt.setService(new Text(address.getAddress().getHostAddress() + ":"
218-
+ address.getPort()));
220+
SecurityUtil.setTokenService(jt, address);
219221
UserGroupInformation current = UserGroupInformation.getCurrentUser();
220222
current.addToken(jt);
221223
for (Token<? extends TokenIdentifier> tok : credentials.getAllTokens()) {

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -180,6 +180,11 @@ class MRClientProtocolHandler implements MRClientProtocol {
180180
private RecordFactory recordFactory =
181181
RecordFactoryProvider.getRecordFactory(null);
182182

183+
@Override
184+
public InetSocketAddress getConnectAddress() {
185+
return getBindAddress();
186+
}
187+
183188
private Job verifyAndGetJob(JobId jobID,
184189
boolean modifyAccess) throws YarnRemoteException {
185190
Job job = appContext.getJob(jobID);

0 commit comments

Comments
 (0)