Skip to content

Commit b00660d

Browse files
committed
formatted via isort
1 parent 7c29c68 commit b00660d

File tree

2 files changed

+8
-6
lines changed

2 files changed

+8
-6
lines changed

src/google/adk/evaluation/local_eval_service.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -226,13 +226,15 @@ async def _evaluate_single_inference_result(
226226
else 'test_user_id'
227227
)
228228

229-
if inference_result.status == InferenceStatus.FAILURE or \
230-
inference_result.inferences is None:
229+
if (
230+
inference_result.status == InferenceStatus.FAILURE
231+
or inference_result.inferences is None
232+
):
231233
logger.error(
232234
'Evaluation attempted on failed inference for eval case `%s`.'
233235
' Error: %s',
234236
inference_result.eval_case_id,
235-
inference_result.error_message
237+
inference_result.error_message,
236238
)
237239
eval_case_result = await self._build_not_evaluated_eval_case_result(
238240
inference_result=inference_result,

tests/unittests/evaluation/test_local_eval_service.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -372,17 +372,17 @@ async def test_evaluate_skips_failed_inference_results(
372372
assert len(results) == 3
373373
results_by_case = {result.eval_id: result for result in results}
374374

375-
failure_result = results_by_case['case_failure']
375+
failure_result = results_by_case["case_failure"]
376376
assert failure_result.final_eval_status == EvalStatus.NOT_EVALUATED
377377
assert failure_result.overall_eval_metric_results == []
378378
assert failure_result.eval_metric_result_per_invocation == []
379379

380-
for case_id in ['case_success', 'case_unknown']:
380+
for case_id in ["case_success", "case_unknown"]:
381381
case_result = results_by_case[case_id]
382382
assert case_result.final_eval_status == EvalStatus.PASSED
383383
assert len(case_result.overall_eval_metric_results) == 1
384384
assert (
385-
case_result.overall_eval_metric_results[0].metric_name == 'fake_metric'
385+
case_result.overall_eval_metric_results[0].metric_name == "fake_metric"
386386
)
387387
assert case_result.overall_eval_metric_results[0].score == 0.9
388388

0 commit comments

Comments
 (0)