@@ -32,21 +32,24 @@ def test_run_benchmark(
32
32
tmp_path : Path ,
33
33
track_metric ,
34
34
upload_assets_on_fail ,
35
- request
35
+ request ,
36
36
):
37
37
track_metric ("scenario_id" , scenario .id )
38
38
# Check if a backend override has been provided via cli options.
39
39
override_backend = request .config .getoption ("--override-backend" )
40
40
backend_filter = request .config .getoption ("--backend-filter" )
41
41
if backend_filter and not re .match (backend_filter , scenario .backend ):
42
- #TODO apply filter during scenario retrieval, but seems to be hard to retrieve cli param
43
- pytest .skip (f"skipping scenario { scenario .id } because backend { scenario .backend } does not match filter { backend_filter !r} " )
42
+ # TODO apply filter during scenario retrieval, but seems to be hard to retrieve cli param
43
+ pytest .skip (
44
+ f"skipping scenario { scenario .id } because backend { scenario .backend } does not match filter { backend_filter !r} "
45
+ )
44
46
backend = scenario .backend
45
47
if override_backend :
46
48
_log .info (f"Overriding backend URL with { override_backend !r} " )
47
49
backend = override_backend
48
50
49
51
connection : openeo .Connection = connection_factory (url = backend )
52
+ track_metric ("milestone" , "connected" , update = True )
50
53
51
54
# TODO #14 scenario option to use synchronous instead of batch job mode?
52
55
job = connection .create_job (
@@ -55,26 +58,34 @@ def test_run_benchmark(
55
58
additional = scenario .job_options ,
56
59
)
57
60
track_metric ("job_id" , job .job_id )
61
+ track_metric ("milestone" , "job created" , update = True )
58
62
59
63
# TODO: monitor timing and progress
60
64
# TODO: abort excessively long batch jobs? https://github.com/Open-EO/openeo-python-client/issues/589
61
65
job .start_and_wait ()
66
+ # TODO: "job started" milestone
67
+ track_metric ("milestone" , "job finished" , update = True )
62
68
63
69
collect_metrics_from_job_metadata (job , track_metric = track_metric )
70
+ track_metric ("milestone" , "job metadata obtained" , update = True )
64
71
65
72
results = job .get_results ()
73
+ track_metric ("milestone" , "job results metadata obtained" , update = True )
66
74
collect_metrics_from_results_metadata (results , track_metric = track_metric )
67
75
68
76
# Download actual results
69
77
actual_dir = tmp_path / "actual"
70
78
paths = results .download_files (target = actual_dir , include_stac_metadata = True )
79
+ track_metric ("milestone" , "job results downloaded" , update = True )
80
+
71
81
# Upload assets on failure
72
82
upload_assets_on_fail (* paths )
73
83
74
84
# Compare actual results with reference data
75
85
reference_dir = download_reference_data (
76
86
scenario = scenario , reference_dir = tmp_path / "reference"
77
87
)
88
+ track_metric ("milestone" , "benchmark reference data downloaded" , update = True )
78
89
79
90
assert_job_results_allclose (
80
91
actual = actual_dir ,
@@ -83,3 +94,4 @@ def test_run_benchmark(
83
94
rtol = scenario .reference_options .get ("rtol" , 1e-6 ),
84
95
atol = scenario .reference_options .get ("atol" , 1e-6 ),
85
96
)
97
+ track_metric ("milestone" , "compared actuals with reference data" , update = True )
0 commit comments