From 244ec626db8612b9f9a2f7d2fab1f7db048d9d3d Mon Sep 17 00:00:00 2001
From: Charlotte Mauvezin <charlotte.mauvezin@irht.cnrs.fr>
Date: Wed, 22 Dec 2021 15:22:53 +0100
Subject: [PATCH] Adding verbose option lunched by adding -v

---
 nerval/evaluate.py | 33 ++++++++++++++++++++++++++++++---
 1 file changed, 30 insertions(+), 3 deletions(-)

diff --git a/nerval/evaluate.py b/nerval/evaluate.py
index 6e2fef9..60c9a99 100644
--- a/nerval/evaluate.py
+++ b/nerval/evaluate.py
@@ -485,7 +485,24 @@ def print_results(scores: dict):
     tt.print(results, header, style=tt.styles.markdown)
 
 
-def run(annotation: str, prediction: str, threshold: int) -> dict:
+def print_result_compact(scores: dict):
+    result = []
+    header = ["tag", "predicted", "matched", "Precision", "Recall", "F1", "Support"]
+    result.append(
+        [
+            "ALl",
+            scores["All"]["predicted"],
+            scores["All"]["matched"],
+            round(scores["All"]["P"], 3),
+            round(scores["All"]["R"], 3),
+            round(scores["All"]["F1"], 3),
+            scores["All"]["Support"],
+        ]
+    )
+    tt.print(result, header, style=tt.styles.markdown)
+
+
+def run(annotation: str, prediction: str, threshold: int, verbose: bool) -> dict:
     """Compute recall and precision for each entity type found in annotation and/or prediction.
 
     Each measure is given at document level, global score is a micro-average across entity types.
@@ -528,7 +545,10 @@ def run(annotation: str, prediction: str, threshold: int) -> dict:
     scores = compute_scores(annot["entity_count"], predict["entity_count"], matches)
 
     # Print results
-    print_results(scores)
+    if verbose:
+        print_results(scores)
+    else:
+        print_result_compact(scores)
 
     return scores
 
@@ -564,9 +584,16 @@ def main():
         default=THRESHOLD,
         type=threshold_float_type,
     )
+    parser.add_argument(
+        "-v",
+        "--verbose",
+        help="Print only the recap if False",
+        action="store_false",
+    )
+
     args = parser.parse_args()
 
-    run(args.annot, args.predict, args.threshold)
+    run(args.annot, args.predict, args.threshold, args.verbose)
 
 
 if __name__ == "__main__":
-- 
GitLab