Skip to content
Snippets Groups Projects
Commit 244ec626 authored by Charlotte Mauvezin's avatar Charlotte Mauvezin
Browse files

Adding verbose option lunched by adding -v

parent 5940c8d9
No related branches found
No related tags found
1 merge request!11Adding verbose option lunched by adding -v
Pipeline #103818 failed
......@@ -485,7 +485,24 @@ def print_results(scores: dict):
tt.print(results, header, style=tt.styles.markdown)
def run(annotation: str, prediction: str, threshold: int) -> dict:
def print_result_compact(scores: dict):
result = []
header = ["tag", "predicted", "matched", "Precision", "Recall", "F1", "Support"]
result.append(
[
"ALl",
scores["All"]["predicted"],
scores["All"]["matched"],
round(scores["All"]["P"], 3),
round(scores["All"]["R"], 3),
round(scores["All"]["F1"], 3),
scores["All"]["Support"],
]
)
tt.print(result, header, style=tt.styles.markdown)
def run(annotation: str, prediction: str, threshold: int, verbose: bool) -> dict:
"""Compute recall and precision for each entity type found in annotation and/or prediction.
Each measure is given at document level, global score is a micro-average across entity types.
......@@ -528,7 +545,10 @@ def run(annotation: str, prediction: str, threshold: int) -> dict:
scores = compute_scores(annot["entity_count"], predict["entity_count"], matches)
# Print results
print_results(scores)
if verbose:
print_results(scores)
else:
print_result_compact(scores)
return scores
......@@ -564,9 +584,16 @@ def main():
default=THRESHOLD,
type=threshold_float_type,
)
parser.add_argument(
"-v",
"--verbose",
help="Print only the recap if False",
action="store_false",
)
args = parser.parse_args()
run(args.annot, args.predict, args.threshold)
run(args.annot, args.predict, args.threshold, args.verbose)
if __name__ == "__main__":
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment