From d506b249a38ee927de434a02e31d86ae096dbccc Mon Sep 17 00:00:00 2001
From: manonBlanco <blanco@teklia.com>
Date: Wed, 3 Jan 2024 14:47:10 +0100
Subject: [PATCH] Use early returns

---
 nerval/evaluate.py | 91 +++++++++++++++++++++++-----------------------
 1 file changed, 46 insertions(+), 45 deletions(-)

diff --git a/nerval/evaluate.py b/nerval/evaluate.py
index 5afe7cc..26f0118 100644
--- a/nerval/evaluate.py
+++ b/nerval/evaluate.py
@@ -349,48 +349,49 @@ def run_multiple(file_csv: Path, folder: Path, threshold: int, verbose: bool):
         csv_reader = reader(read_obj)
         list_cor = list(csv_reader)
 
-    if folder.is_dir():
-        list_bio_file = list(folder.rglob("*.bio"))
-
-        count = 0
-        precision = 0
-        recall = 0
-        f1 = 0
-        for row in list_cor:
-            annot = None
-            predict = None
-
-            for file in list_bio_file:
-                if row[0] == file.name:
-                    annot = file
-            for file in list_bio_file:
-                if row[1] == file.name:
-                    predict = file
-
-            if annot and predict:
-                count += 1
-                scores = run(annot, predict, threshold, verbose)
-                precision += scores["All"]["P"]
-                recall += scores["All"]["R"]
-                f1 += scores["All"]["F1"]
-            else:
-                raise Exception(f"No file found for files {row[0]}, {row[1]}")
-        if count:
-            logger.info("Average score on all corpus")
-            table = PrettyTable()
-            table.field_names = ["Precision", "Recall", "F1"]
-            table.set_style(MARKDOWN)
-            table.add_rows(
-                [
-                    [
-                        round(precision / count, 3),
-                        round(recall / count, 3),
-                        round(f1 / count, 3),
-                    ],
-                ],
-            )
-            print(table)
-        else:
-            raise Exception("No file were counted")
-    else:
-        raise Exception("the path indicated does not lead to a folder.")
+    if not folder.is_dir():
+        raise Exception("The path indicated does not lead to a folder.")
+
+    list_bio_file = list(folder.rglob("*.bio"))
+
+    count = 0
+    precision = 0
+    recall = 0
+    f1 = 0
+    for row in list_cor:
+        annot = None
+        predict = None
+
+        for file in list_bio_file:
+            if row[0] == file.name:
+                annot = file
+        for file in list_bio_file:
+            if row[1] == file.name:
+                predict = file
+
+        if not (annot and predict):
+            raise Exception(f"No file found for files {row[0]}, {row[1]}")
+
+        count += 1
+        scores = run(annot, predict, threshold, verbose)
+        precision += scores["All"]["P"]
+        recall += scores["All"]["R"]
+        f1 += scores["All"]["F1"]
+
+    if not count:
+        raise Exception("No file were counted")
+
+    logger.info("Average score on all corpus")
+    table = PrettyTable()
+    table.field_names = ["Precision", "Recall", "F1"]
+    table.set_style(MARKDOWN)
+    table.add_rows(
+        [
+            [
+                round(precision / count, 3),
+                round(recall / count, 3),
+                round(f1 / count, 3),
+            ],
+        ],
+    )
+    print(table)
-- 
GitLab