From 8398b7c8dec6478a68b827d42dd3b8de94419e0d Mon Sep 17 00:00:00 2001
From: Eva Bardou <bardou@teklia.com>
Date: Wed, 3 Jan 2024 17:45:46 +0000
Subject: [PATCH] Utils method to print a Markdown table

---
 nerval/evaluate.py | 22 +++++++-------------
 nerval/utils.py    | 50 ++++++++++++++++++++++++----------------------
 2 files changed, 33 insertions(+), 39 deletions(-)

diff --git a/nerval/evaluate.py b/nerval/evaluate.py
index d25903f..e05d6a1 100644
--- a/nerval/evaluate.py
+++ b/nerval/evaluate.py
@@ -5,7 +5,6 @@ from typing import List
 
 import editdistance
 import edlib
-from prettytable import MARKDOWN, PrettyTable
 
 from nerval.parse import (
     BEGINNING_POS,
@@ -15,7 +14,7 @@ from nerval.parse import (
     look_for_further_entity_part,
     parse_bio,
 )
-from nerval.utils import print_result_compact, print_results
+from nerval.utils import print_markdown_table, print_result_compact, print_results
 
 logger = logging.getLogger(__name__)
 
@@ -391,16 +390,9 @@ def run_multiple(file_csv: Path, folder: Path, threshold: int, verbose: bool):
         raise Exception("No file were counted")
 
     logger.info("Average score on all corpus")
-    table = PrettyTable()
-    table.field_names = ["Precision", "Recall", "F1"]
-    table.set_style(MARKDOWN)
-    table.add_rows(
-        [
-            [
-                round(precision / count, 3),
-                round(recall / count, 3),
-                round(f1 / count, 3),
-            ],
-        ],
-    )
-    print(table)
+    result = [
+        round(precision / count, 3),
+        round(recall / count, 3),
+        round(f1 / count, 3),
+    ]
+    print_markdown_table(["Precision", "Recall", "F1"], [result])
diff --git a/nerval/utils.py b/nerval/utils.py
index 01d4f83..65aefd7 100644
--- a/nerval/utils.py
+++ b/nerval/utils.py
@@ -1,12 +1,20 @@
 from prettytable import MARKDOWN, PrettyTable
 
 
-def print_results(scores: dict):
+def print_markdown_table(header: list[str], rows: list[list]) -> None:
+    """Prints a Markdown table filled with the provided header and rows."""
+    table = PrettyTable()
+    table.field_names = header
+    table.set_style(MARKDOWN)
+    table.add_rows(rows)
+    print(table)
+
+
+def print_results(scores: dict) -> None:
     """Display final results.
 
     None values are kept to indicate the absence of a certain tag in either annotation or prediction.
     """
-    header = ["tag", "predicted", "matched", "Precision", "Recall", "F1", "Support"]
     results = []
     for tag in sorted(scores, reverse=True):
         prec = None if scores[tag]["P"] is None else round(scores[tag]["P"], 3)
@@ -25,29 +33,23 @@ def print_results(scores: dict):
             ],
         )
 
-    table = PrettyTable()
-    table.field_names = header
-    table.set_style(MARKDOWN)
-    table.add_rows(results)
-    print(table)
+    print_markdown_table(
+        ["tag", "predicted", "matched", "Precision", "Recall", "F1", "Support"],
+        results,
+    )
 
 
-def print_result_compact(scores: dict):
-    header = ["tag", "predicted", "matched", "Precision", "Recall", "F1", "Support"]
+def print_result_compact(scores: dict) -> None:
     result = [
-        [
-            "All",
-            scores["All"]["predicted"],
-            scores["All"]["matched"],
-            round(scores["All"]["P"], 3),
-            round(scores["All"]["R"], 3),
-            round(scores["All"]["F1"], 3),
-            scores["All"]["Support"],
-        ],
+        "All",
+        scores["All"]["predicted"],
+        scores["All"]["matched"],
+        round(scores["All"]["P"], 3),
+        round(scores["All"]["R"], 3),
+        round(scores["All"]["F1"], 3),
+        scores["All"]["Support"],
     ]
-
-    table = PrettyTable()
-    table.field_names = header
-    table.set_style(MARKDOWN)
-    table.add_rows(result)
-    print(table)
+    print_markdown_table(
+        ["tag", "predicted", "matched", "Precision", "Recall", "F1", "Support"],
+        [result],
+    )
-- 
GitLab