|
2 | 2 | // Tests autosplitting heuristics, and that the heuristic counting of chunk sizes |
3 | 3 | // works as expected even after splitting. |
4 | 4 | // |
| 5 | +(function() { |
| 6 | + 'use strict'; |
5 | 7 |
|
6 | | -var st = |
7 | | - new ShardingTest({shards: 1, mongos: 1, other: {mongosOptions: {chunkSize: 1, verbose: 2}}}); |
| 8 | + var st = new ShardingTest({shards: 1, mongos: 1, other: {mongosOptions: {chunkSize: 1}}}); |
8 | 9 |
|
9 | | -// The balancer is by default stopped, thus it will NOT interfere unpredictably with the chunk |
10 | | -// moves/splits depending on the timing. |
| 10 | + // The balancer is by default stopped, thus it will NOT interfere unpredictably with the chunk |
| 11 | + // moves/splits depending on the timing. |
11 | 12 |
|
12 | | -// Test is not valid for debug build, heuristics get all mangled by debug reload behavior |
13 | | -var isDebugBuild = st.s0.getDB("admin").serverBuildInfo().debug; |
| 13 | + // Test is not valid for debug build, heuristics get all mangled by debug reload behavior |
| 14 | + var isDebugBuild = st.s0.getDB("admin").serverBuildInfo().debug; |
14 | 15 |
|
15 | | -if (!isDebugBuild) { |
16 | | - var mongos = st.s0; |
17 | | - var config = mongos.getDB("config"); |
18 | | - var admin = mongos.getDB("admin"); |
19 | | - var coll = mongos.getCollection("foo.hashBar"); |
| 16 | + if (!isDebugBuild) { |
| 17 | + var mongos = st.s0; |
| 18 | + var config = mongos.getDB("config"); |
| 19 | + var admin = mongos.getDB("admin"); |
| 20 | + var coll = mongos.getCollection("foo.hashBar"); |
20 | 21 |
|
21 | | - printjson(admin.runCommand({enableSharding: coll.getDB() + ""})); |
22 | | - printjson(admin.runCommand({shardCollection: coll + "", key: {_id: 1}})); |
| 22 | + assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""})); |
| 23 | + assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}})); |
23 | 24 |
|
24 | | - var numChunks = 10; |
| 25 | + var numChunks = 10; |
25 | 26 |
|
26 | | - // Split off the low and high chunks, to get non-special-case behavior |
27 | | - printjson(admin.runCommand({split: coll + "", middle: {_id: 0}})); |
28 | | - printjson(admin.runCommand({split: coll + "", middle: {_id: numChunks + 1}})); |
| 27 | + // Split off the low and high chunks, to get non-special-case behavior |
| 28 | + assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}})); |
| 29 | + assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: numChunks + 1}})); |
29 | 30 |
|
30 | | - // Split all the other chunks, and an extra chunk |
31 | | - // We need the extra chunk to compensate for the fact that the chunk differ resets the highest |
32 | | - // chunk's (i.e. the last-split-chunk's) data count on reload. |
33 | | - for (var i = 1; i < numChunks + 1; i++) { |
34 | | - printjson(admin.runCommand({split: coll + "", middle: {_id: i}})); |
35 | | - } |
| 31 | + // Split all the other chunks, and an extra chunk. We need the extra chunk to compensate for |
| 32 | + // the fact that the chunk differ resets the highest chunk's (i.e. the last-split-chunk's) |
| 33 | + // data count on reload. |
| 34 | + for (var i = 1; i < numChunks + 1; i++) { |
| 35 | + assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}})); |
| 36 | + } |
36 | 37 |
|
37 | | - jsTest.log("Setup collection..."); |
38 | | - st.printShardingStatus(true); |
| 38 | + jsTest.log("Setup collection..."); |
| 39 | + st.printShardingStatus(true); |
39 | 40 |
|
40 | | - var approxSize = Object.bsonsize({_id: 0.0}); |
| 41 | + var approxSize = Object.bsonsize({_id: 0.0}); |
41 | 42 |
|
42 | | - jsTest.log("Starting inserts of approx size: " + approxSize + "..."); |
| 43 | + jsTest.log("Starting inserts of approx size: " + approxSize + "..."); |
43 | 44 |
|
44 | | - var chunkSizeBytes = 1024 * 1024; |
| 45 | + var chunkSizeBytes = 1024 * 1024; |
45 | 46 |
|
46 | | - // We insert slightly more than the max number of docs per chunk, to test |
47 | | - // if resetting the chunk size happens during reloads. If the size is |
48 | | - // reset, we'd expect to split less, since the first split would then |
49 | | - // disable further splits (statistically, since the decision is randomized). |
50 | | - // We choose 1.4 since split attempts happen about once every 1/5 chunksize, |
51 | | - // and we want to be sure we def get a split attempt at a full chunk. |
52 | | - var insertsForSplit = Math.ceil((chunkSizeBytes * 1.4) / approxSize); |
53 | | - var totalInserts = insertsForSplit * numChunks; |
| 47 | + // We insert slightly more than the max number of docs per chunk, to test |
| 48 | + // if resetting the chunk size happens during reloads. If the size is |
| 49 | + // reset, we'd expect to split less, since the first split would then |
| 50 | + // disable further splits (statistically, since the decision is randomized). |
| 51 | + // We choose 1.4 since split attempts happen about once every 1/5 chunksize, |
| 52 | + // and we want to be sure we def get a split attempt at a full chunk. |
| 53 | + var insertsForSplit = Math.ceil((chunkSizeBytes * 1.4) / approxSize); |
| 54 | + var totalInserts = insertsForSplit * numChunks; |
54 | 55 |
|
55 | | - printjson({ |
56 | | - chunkSizeBytes: chunkSizeBytes, |
57 | | - insertsForSplit: insertsForSplit, |
58 | | - totalInserts: totalInserts |
59 | | - }); |
| 56 | + printjson({ |
| 57 | + chunkSizeBytes: chunkSizeBytes, |
| 58 | + insertsForSplit: insertsForSplit, |
| 59 | + totalInserts: totalInserts |
| 60 | + }); |
60 | 61 |
|
61 | | - // Insert enough docs to trigger splits into all chunks |
62 | | - var bulk = coll.initializeUnorderedBulkOp(); |
63 | | - for (var i = 0; i < totalInserts; i++) { |
64 | | - bulk.insert({_id: i % numChunks + (i / totalInserts)}); |
65 | | - } |
66 | | - assert.writeOK(bulk.execute()); |
| 62 | + // Insert enough docs to trigger splits into all chunks |
| 63 | + var bulk = coll.initializeUnorderedBulkOp(); |
| 64 | + for (var i = 0; i < totalInserts; i++) { |
| 65 | + bulk.insert({_id: i % numChunks + (i / totalInserts)}); |
| 66 | + } |
| 67 | + assert.writeOK(bulk.execute()); |
67 | 68 |
|
68 | | - jsTest.log("Inserts completed..."); |
| 69 | + jsTest.log("Inserts completed..."); |
69 | 70 |
|
70 | | - st.printShardingStatus(true); |
71 | | - printjson(coll.stats()); |
| 71 | + st.printShardingStatus(true); |
| 72 | + printjson(coll.stats()); |
72 | 73 |
|
73 | | - // Check that all chunks (except the two extreme chunks) |
74 | | - // have been split at least once + 1 extra chunk as reload buffer |
75 | | - assert.gte(config.chunks.count(), numChunks * 2 + 3); |
| 74 | + // Check that all chunks (except the two extreme chunks) |
| 75 | + // have been split at least once + 1 extra chunk as reload buffer |
| 76 | + assert.gte(config.chunks.count(), numChunks * 2 + 3); |
76 | 77 |
|
77 | | - jsTest.log("DONE!"); |
| 78 | + jsTest.log("DONE!"); |
| 79 | + |
| 80 | + } else { |
| 81 | + jsTest.log("Disabled test in debug builds."); |
| 82 | + } |
78 | 83 |
|
79 | | -} else { |
80 | | - jsTest.log("Disabled test in debug builds."); |
81 | | -} |
| 84 | + st.stop(); |
82 | 85 |
|
83 | | -st.stop(); |
| 86 | +})(); |
0 commit comments