Skip to content

Commit f10740b

Browse files
authored
Check if extra metrics have type EvaluationMetric (mlflow#10083)
Signed-off-by: Ann Zhang <[email protected]>
1 parent 3a77b73 commit f10740b

File tree

2 files changed

+36
-0
lines changed

2 files changed

+36
-0
lines changed

mlflow/models/evaluation/default_evaluator.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
from mlflow.entities.metric import Metric
2626
from mlflow.exceptions import MlflowException
2727
from mlflow.metrics import (
28+
EvaluationMetric,
2829
MetricValue,
2930
ari_grade_level,
3031
exact_match,
@@ -1744,6 +1745,21 @@ def evaluate(
17441745
if self.extra_metrics is None:
17451746
self.extra_metrics = []
17461747

1748+
bad_metrics = []
1749+
for metric in self.extra_metrics:
1750+
if not isinstance(metric, EvaluationMetric):
1751+
bad_metrics.append(metric)
1752+
if len(bad_metrics) > 0:
1753+
message = "\n".join(
1754+
[f"- Metric '{m}' has type '{type(m).__name__}'" for m in bad_metrics]
1755+
)
1756+
raise MlflowException(
1757+
f"In the 'extra_metrics' parameter, the following metrics have the wrong type:\n"
1758+
f"{message}\n"
1759+
f"Please ensure that all extra metrics are instances of "
1760+
f"mlflow.metrics.EvaluationMetric."
1761+
)
1762+
17471763
if self.model_type in (_ModelType.CLASSIFIER, _ModelType.REGRESSOR):
17481764
inferred_model_type = _infer_model_type_by_labels(self.y)
17491765
if inferred_model_type is not None and model_type != inferred_model_type:

tests/evaluate/test_default_evaluator.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3011,6 +3011,26 @@ def test_multi_output_model_error_handling():
30113011
)
30123012

30133013

3014+
def test_invalid_extra_metrics():
3015+
with mlflow.start_run():
3016+
model_info = mlflow.pyfunc.log_model(
3017+
artifact_path="model", python_model=language_model, input_example=["a", "b"]
3018+
)
3019+
data = pd.DataFrame({"text": ["Hello world", "My name is MLflow"]})
3020+
with pytest.raises(
3021+
MlflowException,
3022+
match="Please ensure that all extra metrics are instances of "
3023+
"mlflow.metrics.EvaluationMetric.",
3024+
):
3025+
mlflow.evaluate(
3026+
model_info.model_uri,
3027+
data,
3028+
model_type="text",
3029+
evaluators="default",
3030+
extra_metrics=[mlflow.metrics.latency],
3031+
)
3032+
3033+
30143034
def test_evaluate_with_latency():
30153035
with mlflow.start_run() as run:
30163036
model_info = mlflow.pyfunc.log_model(

0 commit comments

Comments
 (0)