Skip to content
Snippets Groups Projects

Evaluate predictions with nerval

Merged Manon Blanco requested to merge nerval-evaluate into main
All threads resolved!
Files
3
@@ -26,8 +26,8 @@ METRICS_KEYWORD = {"cer": "chars", "wer": "words", "ner": "tokens"}
@dataclass
class Inference:
ground_truth: List[str]
prediction: List[str]
ground_truth: str
prediction: str
class MetricManager:
@@ -47,9 +47,6 @@ class MetricManager:
self.metric_names: List[str] = metric_names
self.epoch_metrics = defaultdict(list)
# List of inferences (prediction with their ground truth)
self.inferences = []
def format_string_for_cer(self, text: str, remove_token: bool = False):
"""
Format string for CER computation: remove layout tokens and extra spaces
@@ -165,7 +162,6 @@ class MetricManager:
metrics["time"] = [values["time"]]
gt, prediction = values["str_y"], values["str_x"]
self.inferences.append(Inference(ground_truth=gt, prediction=prediction))
for metric_name in metric_names:
match metric_name:
Loading