Skip to content

Commit 0214774

Browse files
SERVER-15704, SERVER-17071: Use resetMyLastApplied and re-enable resync.js
1 parent 52cb584 commit 0214774

File tree

7 files changed

+37
-10
lines changed

7 files changed

+37
-10
lines changed

jstests/replsets/resync.js

Lines changed: 19 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
// test that the resync command works with replica sets and that one does not need to manually
22
// force a replica set resync by deleting all datafiles
3-
4-
// TODO: Remove once test is fixed for SERVER-15704, add "use strict"
5-
if (false) {
3+
// Also tests that you can do this from a node that is "too stale"
4+
(function() {
5+
"use strict";
66
var replTest = new ReplSetTest({name: 'resync', nodes: 3, oplogSize: 1});
77
var nodes = replTest.nodeList();
88

@@ -29,17 +29,27 @@ if (false) {
2929

3030
// create an oplog entry with an insert
3131
assert.writeOK( A.foo.insert({ x: 1 }, { writeConcern: { w: 3, wtimeout: 60000 }}));
32+
assert.eq(B.foo.findOne().x, 1)
33+
34+
// run resync and wait for it to happen
35+
assert.commandWorked(b_conn.getDB("admin").runCommand({resync:1}));
36+
replTest.awaitReplication();
37+
replTest.awaitSecondaryNodes();
38+
39+
assert.eq(B.foo.findOne().x, 1)
3240
replTest.stop(BID);
3341

3442
function hasCycled() {
3543
var oplog = a_conn.getDB("local").oplog.rs;
36-
return oplog.find( { "o.x" : 1 } ).sort( { $natural : 1 } )._addSpecial( "$maxScan" , 10 ).itcount() == 0;
44+
return oplog.find( { "o.x" : 1 } ).sort( { $natural : 1 } ).limit(10).itcount() == 0;
3745
}
3846

47+
// Make sure the oplog has rolled over on the primary and secondary that is up,
48+
// so when we bring up the other replica it is "too stale"
3949
for ( var cycleNumber = 0; cycleNumber < 10; cycleNumber++ ) {
4050
// insert enough to cycle oplog
4151
var bulk = A.foo.initializeUnorderedBulkOp();
42-
for (i=2; i < 10000; i++) {
52+
for (var i=2; i < 10000; i++) {
4353
bulk.insert({x:i});
4454
}
4555

@@ -52,10 +62,9 @@ if (false) {
5262

5363
assert( hasCycled() );
5464

55-
56-
5765
// bring node B and it will enter recovery mode because its newest oplog entry is too old
5866
replTest.restart(BID);
67+
5968
// check that it is in recovery mode
6069
assert.soon(function() {
6170
try {
@@ -71,6 +80,8 @@ if (false) {
7180
assert.commandWorked(b_conn.getDB("admin").runCommand({resync:1}));
7281
replTest.awaitReplication();
7382
replTest.awaitSecondaryNodes();
83+
assert.eq(B.foo.findOne().x, 1)
7484

7585
replTest.stopSet(15);
76-
}
86+
jsTest.log("success");
87+
})();

src/mongo/db/repl/replication_coordinator.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,11 @@ namespace repl {
257257
*/
258258
virtual void setMyLastOptime(const OpTime& ts) = 0;
259259

260+
/**
261+
* Same as above, but used during places we need to zero our last optime.
262+
*/
263+
virtual void resetMyLastOptime() = 0;
264+
260265
/**
261266
* Updates our the message we include in heartbeat responses.
262267
*/

src/mongo/db/repl/replication_coordinator_impl.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -686,6 +686,11 @@ namespace {
686686
_setMyLastOptime_inlock(&lock, ts, false);
687687
}
688688

689+
void ReplicationCoordinatorImpl::resetMyLastOptime() {
690+
boost::unique_lock<boost::mutex> lock(_mutex);
691+
_setMyLastOptime_inlock(&lock, OpTime(), true);
692+
}
693+
689694
void ReplicationCoordinatorImpl::_setMyLastOptime_inlock(
690695
boost::unique_lock<boost::mutex>* lock, const OpTime& ts, bool isRollbackAllowed) {
691696
invariant(lock->owns_lock());

src/mongo/db/repl/replication_coordinator_impl.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,8 @@ namespace repl {
137137

138138
virtual void setMyLastOptime(const OpTime& ts);
139139

140+
virtual void resetMyLastOptime();
141+
140142
virtual void setMyHeartbeatMessage(const std::string& msg);
141143

142144
virtual OpTime getMyLastOptime() const;

src/mongo/db/repl/replication_coordinator_mock.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,8 @@ namespace repl {
132132

133133
void ReplicationCoordinatorMock::setMyLastOptime(const OpTime& ts) {}
134134

135+
void ReplicationCoordinatorMock::resetMyLastOptime() {}
136+
135137
OpTime ReplicationCoordinatorMock::getMyLastOptime() const {
136138
// TODO
137139
return OpTime();

src/mongo/db/repl/replication_coordinator_mock.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,8 @@ namespace repl {
9595

9696
virtual void setMyLastOptime(const OpTime& ts);
9797

98+
virtual void resetMyLastOptime();
99+
98100
virtual void setMyHeartbeatMessage(const std::string& msg);
99101

100102
virtual OpTime getMyLastOptime() const;

src/mongo/db/repl/rs_initialsync.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ namespace {
7878
// via stop().
7979
// We must clear the sync source blacklist after calling stop()
8080
// because the bgsync thread, while running, may update the blacklist.
81-
replCoord->setMyLastOptime(OpTime());
81+
replCoord->resetMyLastOptime();
8282
bgsync->stop();
8383
replCoord->clearSyncSourceBlacklist();
8484

@@ -194,7 +194,7 @@ namespace {
194194
catch (const DBException&) {
195195
log() << "replSet initial sync failed during oplog application phase, and will retry";
196196

197-
getGlobalReplicationCoordinator()->setMyLastOptime(OpTime());
197+
getGlobalReplicationCoordinator()->resetMyLastOptime();
198198
BackgroundSync::get()->setLastAppliedHash(0);
199199

200200
sleepsecs(5);

0 commit comments

Comments
 (0)