@@ -80,9 +80,9 @@ public static void init() throws Exception {
80
80
dir = newDirectory ();
81
81
IndexWriterConfig iwc = new IndexWriterConfig ();
82
82
RandomIndexWriter w = new RandomIndexWriter (random (), dir , iwc );
83
- // the upper bound is higher than 2048 so that in some cases we time out after the first batch of bulk scoring, but before
83
+ // the upper bound is higher than 4096 so that in some cases we time out after the first batch of bulk scoring, but before
84
84
// getting to the end of the first segment
85
- numDocs = scaledRandomIntBetween (500 , 2500 );
85
+ numDocs = scaledRandomIntBetween (500 , 4500 );
86
86
for (int i = 0 ; i < numDocs ; ++i ) {
87
87
Document doc = new Document ();
88
88
doc .add (new StringField ("field" , Integer .toString (i ), Field .Store .NO ));
@@ -309,9 +309,9 @@ public void testBulkScorerTimeout() throws IOException {
309
309
QueryPhase .executeQuery (context );
310
310
assertTrue (context .queryResult ().searchTimedOut ());
311
311
int firstSegmentMaxDoc = reader .leaves ().get (0 ).reader ().maxDoc ();
312
- // See CancellableBulkScorer#INITIAL_INTERVAL for the source of 2048 : we always score the first
313
- // batch of up to 2048 docs, and only then raise the timeout
314
- assertEquals (Math .min (2048 , firstSegmentMaxDoc ), context .queryResult ().topDocs ().topDocs .totalHits .value ());
312
+ // See CancellableBulkScorer#INITIAL_INTERVAL for the source of 4096 : we always score the first
313
+ // batch of up to 4096 docs, and only then raise the timeout
314
+ assertEquals (Math .min (4096 , firstSegmentMaxDoc ), context .queryResult ().topDocs ().topDocs .totalHits .value ());
315
315
assertEquals (Math .min (size , firstSegmentMaxDoc ), context .queryResult ().topDocs ().topDocs .scoreDocs .length );
316
316
}
317
317
}
0 commit comments