Skip to content
Snippets Groups Projects

Display 5 worst predictions at the end of evaluation

Merged Manon Blanco requested to merge display-worst-predictions into main
All threads resolved!
2 files
+ 48
41
Compare changes
  • Side-by-side
  • Inline
Files
2
+ 9
2
@@ -94,7 +94,7 @@ def print_worst_predictions(all_inferences: Dict[str, List[Inference]]):
inference.prediction,
)
alignment_str = f'{alignment["query_aligned"]}\n{alignment["matched_aligned"]}\n{alignment["target_aligned"]}'
table.add_row([inference.image, inference.wer, alignment_str])
table.add_row([inference.image, round(inference.wer * 100, 2), alignment_str])
print(f"\n#### {NB_WORST_PREDICTIONS} worst prediction(s)\n")
print(table)
@@ -126,7 +126,14 @@ def eval_nerval(
if not (ground_truths and predictions):
continue
scores = evaluate(ground_truths, predictions, threshold)
scores = {
key: {
k: round(value * 100, 2) if k in ["P", "R", "F1"] else value
for k, value in values.items()
}
for key, values in evaluate(ground_truths, predictions, threshold).items()
}
print(f"\n##### {split_name}\n")
print_results(scores)
Loading