Skip to content
Snippets Groups Projects
Commit a3752ba3 authored by Blanche Miret's avatar Blanche Miret
Browse files

Merge branch 'feature_total' into 'master'

Adding a line with the mean total for the precision, the recall and the f1...

See merge request teklia/nerval!12
parents 0061dab0 ac6c3400
No related branches found
No related tags found
1 merge request!12Adding a line with the mean total for the precision, the recall and the f1...
Pipeline #103831 passed
......@@ -514,7 +514,6 @@ def run(annotation: str, prediction: str, threshold: int, verbose: bool) -> dict
Each measure is given at document level, global score is a micro-average across entity types.
"""
# Get string and list of labels per character
annot = parse_bio(annotation)
predict = parse_bio(prediction)
......@@ -570,6 +569,10 @@ def run_multiple(file_csv, folder, threshold, verbose):
if os.path.isdir(folder):
list_bio_file = glob.glob(str(folder) + "/**/*.bio", recursive=True)
count = 0
precision = 0
recall = 0
f1 = 0
for row in list_cor:
annot = None
predict = None
......@@ -582,11 +585,24 @@ def run_multiple(file_csv, folder, threshold, verbose):
predict = file
if annot and predict:
count += 1
print(os.path.basename(predict))
run(annot, predict, threshold, verbose)
scores = run(annot, predict, threshold, verbose)
precision += scores["All"]["P"]
recall += scores["All"]["R"]
f1 += scores["All"]["F1"]
print()
else:
raise f"No file found for files {annot}, {predict}"
raise Exception(f"No file found for files {annot}, {predict}")
if count:
print(
"Average scores in all corpus (mean of final files scores)\n"
f" * Precision: {round(precision/count, 3)}\n"
f" * Recall: {round(recall/count, 3)}\n"
f" * F1: {round(f1/count, 3)}\n"
)
else:
raise Exception("No file were counted")
else:
raise Exception("the path indicated does not lead to a folder.")
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment