Skip to content
Snippets Groups Projects

Adding a line with the mean total for the precision, the recall and the f1...

Merged Thibault Lavigne requested to merge feature_total into master
All threads resolved!
1 file
+ 19
3
Compare changes
  • Side-by-side
  • Inline
+ 19
3
@@ -514,7 +514,6 @@ def run(annotation: str, prediction: str, threshold: int, verbose: bool) -> dict
Each measure is given at document level, global score is a micro-average across entity types.
"""
# Get string and list of labels per character
annot = parse_bio(annotation)
predict = parse_bio(prediction)
@@ -570,6 +569,10 @@ def run_multiple(file_csv, folder, threshold, verbose):
if os.path.isdir(folder):
list_bio_file = glob.glob(str(folder) + "/**/*.bio", recursive=True)
count = 0
precision = 0
recall = 0
f1 = 0
for row in list_cor:
annot = None
predict = None
@@ -582,11 +585,24 @@ def run_multiple(file_csv, folder, threshold, verbose):
predict = file
if annot and predict:
count += 1
print(os.path.basename(predict))
run(annot, predict, threshold, verbose)
scores = run(annot, predict, threshold, verbose)
precision += scores["All"]["P"]
recall += scores["All"]["R"]
f1 += scores["All"]["F1"]
print()
else:
raise f"No file found for files {annot}, {predict}"
raise Exception(f"No file found for files {annot}, {predict}")
if count:
print(
"Average scores in all corpus (mean of final files scores)\n"
f" * Precision: {round(precision/count, 3)}\n"
f" * Recall: {round(recall/count, 3)}\n"
f" * F1: {round(f1/count, 3)}\n"
)
else:
raise Exception("No file were counted")
else:
raise Exception("the path indicated does not lead to a folder.")
Loading