Skip to content

Commit e5a7faa

Browse files
author
Greg Studer
committed
SERVER-10478 fix batch limit check for _cloneLocs in migration
1 parent ec60d7d commit e5a7faa

File tree

3 files changed

+69
-1
lines changed

3 files changed

+69
-1
lines changed

bson/bsonobjbuilder.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -752,6 +752,8 @@ namespace mongo {
752752

753753
int len() const { return _b.len(); }
754754

755+
int arrSize() const { return _i; }
756+
755757
private:
756758
void fill( const StringData& name ) {
757759
char *r;
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
//
2+
// Tests migration behavior of large documents
3+
//
4+
5+
var st = new ShardingTest({ shards : 2, mongos : 1,
6+
other : { separateConfig : true,
7+
mongosOptions : { noAutoSplit : "" },
8+
shardOptions : { /* binVersion : "latest" */ } } });
9+
10+
var mongos = st.s0;
11+
var coll = mongos.getCollection( "foo.bar" );
12+
var admin = mongos.getDB( "admin" );
13+
var shards = mongos.getCollection( "config.shards" ).find().toArray();
14+
var shardAdmin = st.shard0.getDB( "admin" );
15+
16+
mongos.getDB( "config" ).settings.update({ _id : "balancer" },
17+
{ $set : { stopped : true } }, true, false);
18+
19+
assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
20+
printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
21+
assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
22+
assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
23+
24+
jsTestLog( "Preparing large insert..." );
25+
26+
var data1MB = "x"
27+
while ( data1MB.length < 1024 * 1024 )
28+
data1MB += data1MB;
29+
30+
var data15MB = "";
31+
for ( var i = 0; i < 15; i++ ) data15MB += data1MB;
32+
33+
var data15PlusMB = data15MB;
34+
for ( var i = 0; i < 1023 * 1024; i++ ) data15PlusMB += "x";
35+
36+
print("~15MB object size is : " + Object.bsonsize({ _id : 0, d : data15PlusMB }));
37+
38+
jsTestLog( "Inserting docs of large and small sizes..." );
39+
40+
// Two large docs next to each other
41+
coll.insert({ _id : -2, d : data15PlusMB });
42+
coll.insert({ _id : -1, d : data15PlusMB });
43+
44+
// Docs of assorted sizes
45+
coll.insert({ _id : 0, d : "x" });
46+
coll.insert({ _id : 1, d : data15PlusMB });
47+
coll.insert({ _id : 2, d : "x" });
48+
coll.insert({ _id : 3, d : data15MB });
49+
coll.insert({ _id : 4, d : "x" });
50+
coll.insert({ _id : 5, d : data1MB });
51+
coll.insert({ _id : 6, d : "x" });
52+
53+
assert.eq( null, coll.getDB().getLastError() );
54+
assert.eq( 9, coll.find().itcount() );
55+
56+
jsTestLog( "Starting migration..." );
57+
58+
assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[1]._id }).ok );
59+
assert( admin.runCommand({ moveChunk : coll + "", find : { _id : -1 }, to : shards[1]._id }).ok );
60+
61+
assert.eq( 9, coll.find().itcount() );
62+
63+
jsTestLog( "DONE!" );
64+
65+
st.stop();

s/d_migrate.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -507,7 +507,8 @@ namespace mongo {
507507

508508
// use the builder size instead of accumulating 'o's size so that we take into consideration
509509
// the overhead of BSONArray indices
510-
if ( a.len() + o.objsize() + 1024 > BSONObjMaxUserSize ) {
510+
if ( a.arrSize() != 0 &&
511+
a.len() + o.objsize() + 1024 > BSONObjMaxUserSize ) {
511512
filledBuffer = true; // break out of outer while loop
512513
break;
513514
}

0 commit comments

Comments
 (0)