Skip to content

Commit e8bb682

Browse files
committed
Fix unittest and more cleanup.
1 parent 26ef13f commit e8bb682

File tree

8 files changed

+11
-110
lines changed

8 files changed

+11
-110
lines changed

src/mongo/db/concurrency/lock_manager.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -73,10 +73,10 @@ class LockManager {
7373
* return value is not LOCK_WAITING, this pointer can be freed and will
7474
* not be used any more.
7575
*
76-
* If the return value is LOCK_WAITING, the notification method will be
77-
* called at some point into the future, when the lock either becomes
78-
* granted or a deadlock is discovered. If unlock is called before the
79-
* lock becomes granted, the notification will not be invoked.
76+
* If the return value is LOCK_WAITING, the notification method will be called
77+
* at some point into the future, when the lock becomes granted. If unlock is
78+
* called before the lock becomes granted, the notification will not be
79+
* invoked.
8080
*
8181
* If the return value is LOCK_WAITING, the notification object *must*
8282
* live at least until the notify method has been invoked or unlock has

src/mongo/db/concurrency/lock_manager_defs.h

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -129,15 +129,6 @@ enum LockResult {
129129
*/
130130
LOCK_TIMEOUT,
131131

132-
/**
133-
* The lock request was not granted because it would result in a deadlock. No changes to
134-
* the state of the Locker would be made if this value is returned (i.e., it will not be
135-
* killed due to deadlock). It is up to the caller to decide how to recover from this
136-
* return value - could be either release some locks and try again, or just bail with an
137-
* error and have some upper code handle it.
138-
*/
139-
LOCK_DEADLOCK,
140-
141132
/**
142133
* This is used as an initializer value. Should never be returned.
143134
*/

src/mongo/db/concurrency/lock_state.cpp

Lines changed: 4 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -71,10 +71,6 @@ class PartitionedInstanceWideLockStats {
7171
_get(id).recordWaitTime(resId, mode, waitMicros);
7272
}
7373

74-
void recordDeadlock(ResourceId resId, LockMode mode) {
75-
_get(resId).recordDeadlock(resId, mode);
76-
}
77-
7874
void report(SingleThreadedLockStats* outStats) const {
7975
for (int i = 0; i < NumPartitions; i++) {
8076
outStats->append(_partitions[i].stats);
@@ -114,7 +110,7 @@ LockManager globalLockManager;
114110
const ResourceId resourceIdGlobal = ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
115111

116112
// How often (in millis) to check for deadlock if a lock has not been granted for some time
117-
const Milliseconds DeadlockTimeout = Milliseconds(500);
113+
const Milliseconds MaxWaitTime = Milliseconds(500);
118114

119115
// Dispenses unique LockerId identifiers
120116
AtomicUInt64 idCounter(0);
@@ -361,8 +357,6 @@ LockResult LockerImpl::_lockGlobalBegin(OperationContext* opCtx, LockMode mode,
361357
if (result == LOCK_OK)
362358
return LOCK_OK;
363359

364-
// Currently, deadlock detection does not happen inline with lock acquisition so the only
365-
// unsuccessful result that the lock manager would return is LOCK_WAITING.
366360
invariant(result == LOCK_WAITING);
367361

368362
return result;
@@ -433,8 +427,6 @@ LockResult LockerImpl::lock(OperationContext* opCtx,
433427
if (result == LOCK_OK)
434428
return LOCK_OK;
435429

436-
// Currently, deadlock detection does not happen inline with lock acquisition so the only
437-
// unsuccessful result that the lock manager would return is LOCK_WAITING.
438430
invariant(result == LOCK_WAITING);
439431

440432
return lockComplete(opCtx, resId, mode, deadline);
@@ -758,9 +750,8 @@ LockResult LockerImpl::lockComplete(OperationContext* opCtx,
758750
timeout = std::min(timeout, _maxLockTimeout.get());
759751
}
760752

761-
// Don't go sleeping without bound in order to be able to report long waits or wake up for
762-
// deadlock detection.
763-
Milliseconds waitTime = std::min(timeout, DeadlockTimeout);
753+
// Don't go sleeping without bound in order to be able to report long waits.
754+
Milliseconds waitTime = std::min(timeout, MaxWaitTime);
764755
const uint64_t startOfTotalWaitTime = curTimeMicros64();
765756
uint64_t startOfCurrentWaitTime = startOfTotalWaitTime;
766757

@@ -800,7 +791,7 @@ LockResult LockerImpl::lockComplete(OperationContext* opCtx,
800791

801792
const auto totalBlockTime = duration_cast<Milliseconds>(
802793
Microseconds(int64_t(curTimeMicros - startOfTotalWaitTime)));
803-
waitTime = (totalBlockTime < timeout) ? std::min(timeout - totalBlockTime, DeadlockTimeout)
794+
waitTime = (totalBlockTime < timeout) ? std::min(timeout - totalBlockTime, MaxWaitTime)
804795
: Milliseconds(0);
805796

806797
if (waitTime == Milliseconds(0)) {

src/mongo/db/concurrency/lock_state.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,6 @@ class LockerImpl : public Locker {
231231
* @param mode Mode which was passed to an earlier lockBegin call. Must match.
232232
* @param deadline The absolute time point when this lock acquisition will time out, if not yet
233233
* granted.
234-
* @param checkDeadlock whether to perform deadlock detection while waiting.
235234
*/
236235
LockResult lockComplete(OperationContext* opCtx,
237236
ResourceId resId,

src/mongo/db/concurrency/lock_state_test.cpp

Lines changed: 1 addition & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -236,57 +236,6 @@ TEST(LockerImpl, DefaultLocker) {
236236
ASSERT(locker.unlockGlobal());
237237
}
238238

239-
TEST(LockerImpl, CanceledDeadlockUnblocks) {
240-
const ResourceId db1(RESOURCE_DATABASE, "db1"_sd);
241-
const ResourceId db2(RESOURCE_DATABASE, "db2"_sd);
242-
243-
LockerImpl locker1;
244-
LockerImpl locker2;
245-
LockerImpl locker3;
246-
247-
ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IX));
248-
ASSERT(LOCK_OK == locker1.lock(db1, MODE_S));
249-
250-
ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IX));
251-
ASSERT(LOCK_OK == locker2.lock(db2, MODE_X));
252-
253-
// Set up locker1 and locker2 for deadlock
254-
ASSERT(LOCK_WAITING == locker1.lockBegin(nullptr, db2, MODE_X));
255-
ASSERT(LOCK_WAITING == locker2.lockBegin(nullptr, db1, MODE_X));
256-
257-
// Locker3 blocks behind locker 2
258-
ASSERT(LOCK_OK == locker3.lockGlobal(MODE_IX));
259-
ASSERT(LOCK_WAITING == locker3.lockBegin(nullptr, db1, MODE_S));
260-
261-
// Detect deadlock, canceling our request
262-
ASSERT(
263-
LOCK_DEADLOCK ==
264-
locker2.lockComplete(db1, MODE_X, Date_t::now() + Milliseconds(1), /*checkDeadlock*/ true));
265-
266-
// Now locker3 must be able to complete its request
267-
ASSERT(LOCK_OK ==
268-
locker3.lockComplete(
269-
db1, MODE_S, Date_t::now() + Milliseconds(1), /*checkDeadlock*/ false));
270-
271-
// Locker1 still can't complete its request
272-
ASSERT(LOCK_TIMEOUT ==
273-
locker1.lockComplete(db2, MODE_X, Date_t::now() + Milliseconds(1), false));
274-
275-
// Check ownership for db1
276-
ASSERT(locker1.getLockMode(db1) == MODE_S);
277-
ASSERT(locker2.getLockMode(db1) == MODE_NONE);
278-
ASSERT(locker3.getLockMode(db1) == MODE_S);
279-
280-
// Check ownership for db2
281-
ASSERT(locker1.getLockMode(db2) == MODE_NONE);
282-
ASSERT(locker2.getLockMode(db2) == MODE_X);
283-
ASSERT(locker3.getLockMode(db2) == MODE_NONE);
284-
285-
ASSERT(locker1.unlockGlobal());
286-
ASSERT(locker2.unlockGlobal());
287-
ASSERT(locker3.unlockGlobal());
288-
}
289-
290239
TEST(LockerImpl, SharedLocksShouldTwoPhaseLockIsTrue) {
291240
// Test that when setSharedLocksShouldTwoPhaseLock is true and we are in a WUOW, unlock on IS
292241
// and S locks are postponed until endWriteUnitOfWork() is called. Mode IX and X locks always
@@ -526,9 +475,7 @@ TEST(LockerImpl, GetLockerInfoShouldReportPendingLocks) {
526475
ASSERT(successfulLocker.unlock(dbId));
527476
ASSERT(successfulLocker.unlockGlobal());
528477

529-
const bool checkDeadlock = false;
530-
ASSERT_EQ(LOCK_OK,
531-
conflictingLocker.lockComplete(collectionId, MODE_IS, Date_t::now(), checkDeadlock));
478+
ASSERT_EQ(LOCK_OK, conflictingLocker.lockComplete(collectionId, MODE_IS, Date_t::now()));
532479

533480
conflictingLocker.getLockerInfo(&lockerInfo, boost::none);
534481
ASSERT_FALSE(lockerInfo.waitingResource.isValid());

src/mongo/db/concurrency/lock_stats.cpp

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -115,24 +115,6 @@ void LockStats<CounterType>::_report(BSONObjBuilder* builder,
115115
}
116116
}
117117
}
118-
119-
// Deadlocks
120-
{
121-
std::unique_ptr<BSONObjBuilder> deadlockCount;
122-
for (int mode = 1; mode < LockModesCount; mode++) {
123-
const long long value = CounterOps::get(stat.modeStats[mode].numDeadlocks);
124-
if (value > 0) {
125-
if (!deadlockCount) {
126-
if (!section) {
127-
section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
128-
}
129-
130-
deadlockCount.reset(new BSONObjBuilder(section->subobjStart("deadlockCount")));
131-
}
132-
deadlockCount->append(legacyModeName(static_cast<LockMode>(mode)), value);
133-
}
134-
}
135-
}
136118
}
137119

138120
template <typename CounterType>

src/mongo/db/concurrency/lock_stats.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -82,29 +82,25 @@ struct LockStatCounters {
8282
CounterOps::add(numAcquisitions, other.numAcquisitions);
8383
CounterOps::add(numWaits, other.numWaits);
8484
CounterOps::add(combinedWaitTimeMicros, other.combinedWaitTimeMicros);
85-
CounterOps::add(numDeadlocks, other.numDeadlocks);
8685
}
8786

8887
template <typename OtherType>
8988
void subtract(const LockStatCounters<OtherType>& other) {
9089
CounterOps::add(numAcquisitions, -other.numAcquisitions);
9190
CounterOps::add(numWaits, -other.numWaits);
9291
CounterOps::add(combinedWaitTimeMicros, -other.combinedWaitTimeMicros);
93-
CounterOps::add(numDeadlocks, -other.numDeadlocks);
9492
}
9593

9694
void reset() {
9795
CounterOps::set(numAcquisitions, 0);
9896
CounterOps::set(numWaits, 0);
9997
CounterOps::set(combinedWaitTimeMicros, 0);
100-
CounterOps::set(numDeadlocks, 0);
10198
}
10299

103100

104101
CounterType numAcquisitions;
105102
CounterType numWaits;
106103
CounterType combinedWaitTimeMicros;
107-
CounterType numDeadlocks;
108104
};
109105

110106

@@ -135,10 +131,6 @@ class LockStats {
135131
CounterOps::add(get(resId, mode).combinedWaitTimeMicros, waitMicros);
136132
}
137133

138-
void recordDeadlock(ResourceId resId, LockMode mode) {
139-
CounterOps::add(get(resId, mode).numDeadlocks, 1);
140-
}
141-
142134
LockStatCountersType& get(ResourceId resId, LockMode mode) {
143135
if (resId == resourceIdOplog) {
144136
return _oplogStats.modeStats[mode];

src/mongo/db/concurrency/lock_stats_test.cpp

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -68,9 +68,8 @@ TEST(LockStats, Wait) {
6868
ASSERT_EQUALS(LOCK_WAITING, lockerConflict.lockBegin(nullptr, resId, MODE_S));
6969

7070
// Sleep 1 millisecond so the wait time passes
71-
ASSERT_EQUALS(
72-
LOCK_TIMEOUT,
73-
lockerConflict.lockComplete(resId, MODE_S, Date_t::now() + Milliseconds(5), false));
71+
ASSERT_EQUALS(LOCK_TIMEOUT,
72+
lockerConflict.lockComplete(resId, MODE_S, Date_t::now() + Milliseconds(5)));
7473
}
7574

7675
// Make sure that the waits/blocks are non-zero

0 commit comments

Comments
 (0)