From c844e0240956215180dffdc7a1f457c83f119959 Mon Sep 17 00:00:00 2001
From: EvaBardou <bardou@teklia.com>
Date: Wed, 3 Jan 2024 17:11:34 +0100
Subject: [PATCH] Utils method to print a Markdown table

---
 nerval/evaluate.py | 22 +++++++---------------
 nerval/utils.py    | 45 +++++++++++++++++++++++----------------------
 2 files changed, 30 insertions(+), 37 deletions(-)

diff --git a/nerval/evaluate.py b/nerval/evaluate.py
index d25903f..62e6b88 100644
--- a/nerval/evaluate.py
+++ b/nerval/evaluate.py
@@ -5,7 +5,6 @@ from typing import List
 
 import editdistance
 import edlib
-from prettytable import MARKDOWN, PrettyTable
 
 from nerval.parse import (
     BEGINNING_POS,
@@ -15,7 +14,7 @@ from nerval.parse import (
     look_for_further_entity_part,
     parse_bio,
 )
-from nerval.utils import print_result_compact, print_results
+from nerval.utils import print_mardown_table, print_result_compact, print_results
 
 logger = logging.getLogger(__name__)
 
@@ -391,16 +390,9 @@ def run_multiple(file_csv: Path, folder: Path, threshold: int, verbose: bool):
         raise Exception("No file were counted")
 
     logger.info("Average score on all corpus")
-    table = PrettyTable()
-    table.field_names = ["Precision", "Recall", "F1"]
-    table.set_style(MARKDOWN)
-    table.add_rows(
-        [
-            [
-                round(precision / count, 3),
-                round(recall / count, 3),
-                round(f1 / count, 3),
-            ],
-        ],
-    )
-    print(table)
+    result = [
+        round(precision / count, 3),
+        round(recall / count, 3),
+        round(f1 / count, 3),
+    ]
+    print_mardown_table(["Precision", "Recall", "F1"], [result])
diff --git a/nerval/utils.py b/nerval/utils.py
index 01d4f83..253c14d 100644
--- a/nerval/utils.py
+++ b/nerval/utils.py
@@ -1,12 +1,19 @@
 from prettytable import MARKDOWN, PrettyTable
 
 
+def print_mardown_table(header: list[str], rows: list[list]):
+    table = PrettyTable()
+    table.field_names = header
+    table.set_style(MARKDOWN)
+    table.add_rows(rows)
+    print(table)
+
+
 def print_results(scores: dict):
     """Display final results.
 
     None values are kept to indicate the absence of a certain tag in either annotation or prediction.
     """
-    header = ["tag", "predicted", "matched", "Precision", "Recall", "F1", "Support"]
     results = []
     for tag in sorted(scores, reverse=True):
         prec = None if scores[tag]["P"] is None else round(scores[tag]["P"], 3)
@@ -25,29 +32,23 @@ def print_results(scores: dict):
             ],
         )
 
-    table = PrettyTable()
-    table.field_names = header
-    table.set_style(MARKDOWN)
-    table.add_rows(results)
-    print(table)
+    print_mardown_table(
+        ["tag", "predicted", "matched", "Precision", "Recall", "F1", "Support"],
+        results,
+    )
 
 
 def print_result_compact(scores: dict):
-    header = ["tag", "predicted", "matched", "Precision", "Recall", "F1", "Support"]
     result = [
-        [
-            "All",
-            scores["All"]["predicted"],
-            scores["All"]["matched"],
-            round(scores["All"]["P"], 3),
-            round(scores["All"]["R"], 3),
-            round(scores["All"]["F1"], 3),
-            scores["All"]["Support"],
-        ],
+        "All",
+        scores["All"]["predicted"],
+        scores["All"]["matched"],
+        round(scores["All"]["P"], 3),
+        round(scores["All"]["R"], 3),
+        round(scores["All"]["F1"], 3),
+        scores["All"]["Support"],
     ]
-
-    table = PrettyTable()
-    table.field_names = header
-    table.set_style(MARKDOWN)
-    table.add_rows(result)
-    print(table)
+    print_mardown_table(
+        ["tag", "predicted", "matched", "Precision", "Recall", "F1", "Support"],
+        [result],
+    )
-- 
GitLab