File tree Expand file tree Collapse file tree 2 files changed +8
-6
lines changed
src/google/adk/evaluation
tests/unittests/evaluation Expand file tree Collapse file tree 2 files changed +8
-6
lines changed Original file line number Diff line number Diff line change @@ -227,13 +227,15 @@ async def _evaluate_single_inference_result(
227227 else 'test_user_id'
228228 )
229229
230- if inference_result .status == InferenceStatus .FAILURE or \
231- inference_result .inferences is None :
230+ if (
231+ inference_result .status == InferenceStatus .FAILURE
232+ or inference_result .inferences is None
233+ ):
232234 logger .error (
233235 'Evaluation attempted on failed inference for eval case `%s`.'
234236 ' Error: %s' ,
235237 inference_result .eval_case_id ,
236- inference_result .error_message
238+ inference_result .error_message ,
237239 )
238240 eval_case_result = await self ._build_not_evaluated_eval_case_result (
239241 inference_result = inference_result ,
Original file line number Diff line number Diff line change @@ -372,17 +372,17 @@ async def test_evaluate_skips_failed_inference_results(
372372 assert len (results ) == 3
373373 results_by_case = {result .eval_id : result for result in results }
374374
375- failure_result = results_by_case [' case_failure' ]
375+ failure_result = results_by_case [" case_failure" ]
376376 assert failure_result .final_eval_status == EvalStatus .NOT_EVALUATED
377377 assert failure_result .overall_eval_metric_results == []
378378 assert failure_result .eval_metric_result_per_invocation == []
379379
380- for case_id in [' case_success' , ' case_unknown' ]:
380+ for case_id in [" case_success" , " case_unknown" ]:
381381 case_result = results_by_case [case_id ]
382382 assert case_result .final_eval_status == EvalStatus .PASSED
383383 assert len (case_result .overall_eval_metric_results ) == 1
384384 assert (
385- case_result .overall_eval_metric_results [0 ].metric_name == ' fake_metric'
385+ case_result .overall_eval_metric_results [0 ].metric_name == " fake_metric"
386386 )
387387 assert case_result .overall_eval_metric_results [0 ].score == 0.9
388388
You can’t perform that action at this time.
0 commit comments