Skip to content

Fix and unmute SemanticInferenceMetadataFieldsRecoveryTests #126784

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Apr 15, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 0 additions & 9 deletions muted-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -240,15 +240,6 @@ tests:
- class: org.elasticsearch.smoketest.MlWithSecurityIT
method: test {yaml=ml/3rd_party_deployment/Test start and stop multiple deployments}
issue: https://github.com/elastic/elasticsearch/issues/124315
- class: org.elasticsearch.xpack.inference.mapper.SemanticInferenceMetadataFieldsRecoveryTests
method: testSnapshotRecovery {p0=false p1=true}
issue: https://github.com/elastic/elasticsearch/issues/124383
- class: org.elasticsearch.xpack.inference.mapper.SemanticInferenceMetadataFieldsRecoveryTests
method: testSnapshotRecovery {p0=true p1=false}
issue: https://github.com/elastic/elasticsearch/issues/124384
- class: org.elasticsearch.xpack.inference.mapper.SemanticInferenceMetadataFieldsRecoveryTests
method: testSnapshotRecovery {p0=false p1=false}
issue: https://github.com/elastic/elasticsearch/issues/124385
- class: org.elasticsearch.env.NodeEnvironmentTests
method: testIndexCompatibilityChecks
issue: https://github.com/elastic/elasticsearch/issues/124388
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1400,7 +1400,8 @@ public static void assertConsistentHistoryBetweenTranslogAndLuceneIndex(Engine e
assertThat(luceneOp.toString(), luceneOp.primaryTerm(), equalTo(translogOp.primaryTerm()));
assertThat(luceneOp.opType(), equalTo(translogOp.opType()));
if (luceneOp.opType() == Translog.Operation.Type.INDEX) {
if (engine.engineConfig.getIndexSettings().isRecoverySourceSyntheticEnabled()) {
if (engine.engineConfig.getIndexSettings().isRecoverySourceSyntheticEnabled()
|| engine.engineConfig.getMapperService().mappingLookup().inferenceFields().isEmpty() == false) {
assertTrue(
"luceneOp=" + luceneOp + " != translogOp=" + translogOp,
translogOperationAsserter.assertSameIndexOperation((Translog.Index) luceneOp, (Translog.Index) translogOp)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import org.elasticsearch.index.engine.LuceneChangesSnapshot;
import org.elasticsearch.index.engine.LuceneSyntheticSourceChangesSnapshot;
import org.elasticsearch.index.engine.SearchBasedChangesSnapshot;
import org.elasticsearch.index.engine.TranslogOperationAsserter;
import org.elasticsearch.index.mapper.InferenceMetadataFieldsMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.SourceFieldMapper;
Expand All @@ -42,7 +43,6 @@
import java.util.ArrayList;
import java.util.List;

import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
import static org.elasticsearch.xpack.inference.mapper.SemanticTextFieldTests.generateRandomChunkingSettings;
import static org.elasticsearch.xpack.inference.mapper.SemanticTextFieldTests.randomChunkedInferenceEmbeddingByte;
import static org.elasticsearch.xpack.inference.mapper.SemanticTextFieldTests.randomChunkedInferenceEmbeddingFloat;
Expand Down Expand Up @@ -137,7 +137,7 @@ protected String defaultMapping() {
}

public void testSnapshotRecovery() throws IOException {
List<Engine.Index> expectedOperations = new ArrayList<>();
List<Translog.Index> expectedOperations = new ArrayList<>();
int size = randomIntBetween(10, 50);
for (int i = 0; i < size; i++) {
var source = randomSource();
Expand All @@ -159,8 +159,19 @@ public void testSnapshotRecovery() throws IOException {
}
}
var op = indexForDoc(doc);
expectedOperations.add(op);
engine.index(op);
var result = engine.index(op);
expectedOperations.add(
new Translog.Index(
result.getId(),
result.getSeqNo(),
result.getTerm(),
result.getVersion(),
op.source(),
op.routing(),
op.getAutoGeneratedIdTimestamp()
)
);

if (frequently()) {
engine.flush();
}
Expand All @@ -181,13 +192,12 @@ public void testSnapshotRecovery() throws IOException {
IndexVersion.current()
)
) {
var asserter = TranslogOperationAsserter.withEngineConfig(engine.config());
for (int i = 0; i < size; i++) {
var op = snapshot.next();
assertThat(op.opType(), equalTo(Translog.Operation.Type.INDEX));
Translog.Index indexOp = (Translog.Index) op;
assertThat(indexOp.id(), equalTo(expectedOperations.get(i).id()));
assertThat(indexOp.routing(), equalTo(expectedOperations.get(i).routing()));
assertToXContentEquivalent(indexOp.source(), expectedOperations.get(i).source(), XContentType.JSON);
asserter.assertSameIndexOperation(indexOp, expectedOperations.get(i));
}
assertNull(snapshot.next());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,8 @@ public static ChunkedInferenceEmbedding randomChunkedInferenceEmbeddingByte(Mode
for (String input : inputs) {
byte[] values = new byte[embeddingLength];
for (int j = 0; j < values.length; j++) {
values[j] = randomByte();
// to avoid vectors with zero magnitude
values[j] = (byte) Math.max(1, randomByte());
}
chunks.add(
new EmbeddingResults.Chunk(
Expand All @@ -215,7 +216,8 @@ public static ChunkedInferenceEmbedding randomChunkedInferenceEmbeddingFloat(Mod
for (String input : inputs) {
float[] values = new float[embeddingLength];
for (int j = 0; j < values.length; j++) {
values[j] = randomFloat();
// to avoid vectors with zero magnitude
values[j] = Math.max(1e-6f, randomFloat());
}
chunks.add(
new EmbeddingResults.Chunk(
Expand All @@ -236,7 +238,7 @@ public static ChunkedInferenceEmbedding randomChunkedInferenceEmbeddingSparse(Li
for (String input : inputs) {
var tokens = new ArrayList<WeightedToken>();
for (var token : input.split("\\s+")) {
tokens.add(new WeightedToken(token, withFloats ? randomFloat() : randomIntBetween(1, 255)));
tokens.add(new WeightedToken(token, withFloats ? Math.max(Float.MIN_NORMAL, randomFloat()) : randomIntBetween(1, 255)));
}
chunks.add(
new EmbeddingResults.Chunk(
Expand Down