Skip to content

Commit ce81bab

Browse files
committed
HDFS-3573. Supply NamespaceInfo when instantiating JournalManagers. Contributed by Todd Lipcon.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1356388 13f79535-47bb-0310-9956-ffa450edef68
1 parent 075b376 commit ce81bab

File tree

9 files changed

+77
-33
lines changed

9 files changed

+77
-33
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,8 @@ Trunk (unreleased changes)
102102
HDFS-3510. Editlog pre-allocation is performed prior to writing edits
103103
to avoid partial edits case disk out of space.(Colin McCabe via suresh)
104104

105+
HDFS-3573. Supply NamespaceInfo when instantiating JournalManagers (todd)
106+
105107
OPTIMIZATIONS
106108

107109
BUG FIXES

hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream;
2323
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
2424
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
25+
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
2526
import org.apache.hadoop.conf.Configuration;
2627

2728
import org.apache.bookkeeper.conf.ClientConfiguration;
@@ -158,11 +159,19 @@ private byte[] intToBytes(int i) {
158159
(byte)(i) };
159160
}
160161

162+
BookKeeperJournalManager(Configuration conf, URI uri) throws IOException {
163+
this(conf, uri, null);
164+
// TODO(ivank): update BookKeeperJournalManager to do something
165+
// with the NamespaceInfo. This constructor has been added
166+
// for compatibility with the old tests, and may be removed
167+
// when the tests are updated.
168+
}
169+
161170
/**
162171
* Construct a Bookkeeper journal manager.
163172
*/
164-
public BookKeeperJournalManager(Configuration conf, URI uri)
165-
throws IOException {
173+
public BookKeeperJournalManager(Configuration conf, URI uri,
174+
NamespaceInfo nsInfo) throws IOException {
166175
this.conf = conf;
167176
String zkConnect = uri.getAuthority().replace(";", ",");
168177
String zkPath = uri.getPath();

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@
6464
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
6565
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
6666
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
67+
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
6768
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
6869
import org.apache.hadoop.io.IOUtils;
6970
import org.apache.hadoop.security.token.delegation.DelegationKey;
@@ -324,7 +325,7 @@ synchronized void close() {
324325
endCurrentLogSegment(true);
325326
}
326327

327-
if (!journalSet.isEmpty()) {
328+
if (journalSet != null && !journalSet.isEmpty()) {
328329
try {
329330
journalSet.close();
330331
} catch (IOException ioe) {
@@ -1010,7 +1011,10 @@ public synchronized void purgeLogsOlderThan(final long minTxIdToKeep) {
10101011
minTxIdToKeep <= curSegmentTxId :
10111012
"cannot purge logs older than txid " + minTxIdToKeep +
10121013
" when current segment starts at " + curSegmentTxId;
1013-
1014+
if (minTxIdToKeep == 0) {
1015+
return;
1016+
}
1017+
10141018
// This could be improved to not need synchronization. But currently,
10151019
// journalSet is not threadsafe, so we need to synchronize this method.
10161020
try {
@@ -1260,8 +1264,9 @@ private JournalManager createJournal(URI uri) {
12601264

12611265
try {
12621266
Constructor<? extends JournalManager> cons
1263-
= clazz.getConstructor(Configuration.class, URI.class);
1264-
return cons.newInstance(conf, uri);
1267+
= clazz.getConstructor(Configuration.class, URI.class,
1268+
NamespaceInfo.class);
1269+
return cons.newInstance(conf, uri, storage.getNamespaceInfo());
12651270
} catch (Exception e) {
12661271
throw new IllegalArgumentException("Unable to construct journal, "
12671272
+ uri, e);

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -126,12 +126,6 @@ protected FSImage(Configuration conf,
126126
}
127127

128128
this.editLog = new FSEditLog(conf, storage, editsDirs);
129-
String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
130-
if (!HAUtil.isHAEnabled(conf, nameserviceId)) {
131-
editLog.initJournalsForWrite();
132-
} else {
133-
editLog.initSharedJournalsForRead();
134-
}
135129

136130
archivalManager = new NNStorageRetentionManager(conf, storage, editLog);
137131
}
@@ -511,6 +505,7 @@ void doImportCheckpoint(FSNamesystem target) throws IOException {
511505
// return back the real image
512506
realImage.getStorage().setStorageInfo(ckptImage.getStorage());
513507
realImage.getEditLog().setNextTxId(ckptImage.getEditLog().getLastWrittenTxId()+1);
508+
realImage.initEditLog();
514509

515510
target.dir.fsImage = realImage;
516511
realImage.getStorage().setBlockPoolID(ckptImage.getBlockPoolID());
@@ -584,10 +579,8 @@ boolean loadFSImage(FSNamesystem target, MetaRecoveryContext recovery)
584579

585580
Iterable<EditLogInputStream> editStreams = null;
586581

587-
if (editLog.isOpenForWrite()) {
588-
// We only want to recover streams if we're going into Active mode.
589-
editLog.recoverUnclosedStreams();
590-
}
582+
initEditLog();
583+
591584
if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT,
592585
getLayoutVersion())) {
593586
// If we're open for write, we're either non-HA or we're the active NN, so
@@ -645,6 +638,17 @@ boolean loadFSImage(FSNamesystem target, MetaRecoveryContext recovery)
645638
return needToSave;
646639
}
647640

641+
public void initEditLog() {
642+
Preconditions.checkState(getNamespaceID() != 0,
643+
"Must know namespace ID before initting edit log");
644+
String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
645+
if (!HAUtil.isHAEnabled(conf, nameserviceId)) {
646+
editLog.initJournalsForWrite();
647+
editLog.recoverUnclosedStreams();
648+
} else {
649+
editLog.initSharedJournalsForRead();
650+
}
651+
}
648652

649653
/**
650654
* @param imageFile the image file that was loaded

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -228,12 +228,6 @@ synchronized public void selectInputStreams(
228228
(inProgressOk ? " (inProgress ok) " : " (excluding inProgress) ") +
229229
"from among " + elfs.size() + " candidate file(s)");
230230
for (EditLogFile elf : elfs) {
231-
if (elf.lastTxId < fromTxId) {
232-
LOG.debug("passing over " + elf + " because it ends at " +
233-
elf.lastTxId + ", but we only care about transactions " +
234-
"as new as " + fromTxId);
235-
continue;
236-
}
237231
if (elf.isInProgress()) {
238232
if (!inProgressOk) {
239233
LOG.debug("passing over " + elf + " because it is in progress " +
@@ -248,6 +242,13 @@ synchronized public void selectInputStreams(
248242
continue;
249243
}
250244
}
245+
if (elf.lastTxId < fromTxId) {
246+
assert elf.lastTxId != HdfsConstants.INVALID_TXID;
247+
LOG.debug("passing over " + elf + " because it ends at " +
248+
elf.lastTxId + ", but we only care about transactions " +
249+
"as new as " + fromTxId);
250+
continue;
251+
}
251252
EditLogFileInputStream elfis = new EditLogFileInputStream(elf.getFile(),
252253
elf.getFirstTxId(), elf.getLastTxId(), elf.isInProgress());
253254
LOG.debug("selecting edit log stream " + elf);

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1125,4 +1125,13 @@ FSImageStorageInspector readAndInspectDirs()
11251125
inspectStorageDirs(inspector);
11261126
return inspector;
11271127
}
1128+
1129+
public NamespaceInfo getNamespaceInfo() {
1130+
return new NamespaceInfo(
1131+
getNamespaceID(),
1132+
getClusterID(),
1133+
getBlockPoolID(),
1134+
getCTime(),
1135+
getDistributedUpgradeVersion());
1136+
}
11281137
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,6 @@
6767
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
6868
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
6969
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
70-
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
7170
import org.apache.hadoop.hdfs.util.AtomicFileOutputStream;
7271
import org.apache.hadoop.io.IOUtils;
7372
import org.apache.hadoop.ipc.Server;
@@ -830,12 +829,7 @@ private static boolean initializeSharedEdits(Configuration conf,
830829
Lists.<URI>newArrayList(),
831830
sharedEditsDirs);
832831

833-
newSharedStorage.format(new NamespaceInfo(
834-
existingStorage.getNamespaceID(),
835-
existingStorage.getClusterID(),
836-
existingStorage.getBlockPoolID(),
837-
existingStorage.getCTime(),
838-
existingStorage.getDistributedUpgradeVersion()));
832+
newSharedStorage.format(existingStorage.getNamespaceInfo());
839833

840834
// Need to make sure the edit log segments are in good shape to initialize
841835
// the shared edits dir.

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -188,6 +188,8 @@ private int doRun() throws IOException {
188188
// Load the newly formatted image, using all of the directories (including shared
189189
// edits)
190190
FSImage image = new FSImage(conf);
191+
image.getStorage().setStorageInfo(storage);
192+
image.initEditLog();
191193
assert image.getEditLog().isOpenForRead() :
192194
"Expected edit log to be open for read";
193195

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java

Lines changed: 22 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,15 @@
2424
import org.apache.hadoop.hdfs.MiniDFSCluster;
2525
import org.apache.hadoop.conf.Configuration;
2626
import org.apache.hadoop.hdfs.DFSConfigKeys;
27+
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
2728

2829
import java.net.URI;
2930
import java.util.Collection;
3031
import java.io.IOException;
3132

3233
public class TestGenericJournalConf {
34+
private static final String DUMMY_URI = "dummy://test";
35+
3336
/**
3437
* Test that an exception is thrown if a journal class doesn't exist
3538
* in the configuration
@@ -114,12 +117,17 @@ public void testDummyJournalManager() throws Exception {
114117

115118
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
116119
DummyJournalManager.class.getName());
117-
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
118-
"dummy://test");
120+
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, DUMMY_URI);
119121
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY, 0);
120122
try {
121123
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
122124
cluster.waitActive();
125+
126+
assertNotNull(DummyJournalManager.conf);
127+
assertEquals(new URI(DUMMY_URI), DummyJournalManager.uri);
128+
assertNotNull(DummyJournalManager.nsInfo);
129+
assertEquals(DummyJournalManager.nsInfo.getClusterID(),
130+
cluster.getNameNode().getNamesystem().getClusterId());
123131
} finally {
124132
if (cluster != null) {
125133
cluster.shutdown();
@@ -128,7 +136,17 @@ public void testDummyJournalManager() throws Exception {
128136
}
129137

130138
public static class DummyJournalManager implements JournalManager {
131-
public DummyJournalManager(Configuration conf, URI u) {}
139+
static Configuration conf = null;
140+
static URI uri = null;
141+
static NamespaceInfo nsInfo = null;
142+
143+
public DummyJournalManager(Configuration conf, URI u,
144+
NamespaceInfo nsInfo) {
145+
// Set static vars so the test case can verify them.
146+
DummyJournalManager.conf = conf;
147+
DummyJournalManager.uri = u;
148+
DummyJournalManager.nsInfo = nsInfo;
149+
}
132150

133151
@Override
134152
public EditLogOutputStream startLogSegment(long txId) throws IOException {
@@ -162,7 +180,7 @@ public void close() throws IOException {}
162180

163181
public static class BadConstructorJournalManager extends DummyJournalManager {
164182
public BadConstructorJournalManager() {
165-
super(null, null);
183+
super(null, null, null);
166184
}
167185
}
168186
}

0 commit comments

Comments
 (0)