Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
nerval
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Named Entity Recognition
nerval
Commits
8398b7c8
Commit
8398b7c8
authored
1 year ago
by
Eva Bardou
Committed by
Yoann Schneider
1 year ago
Browse files
Options
Downloads
Patches
Plain Diff
Utils method to print a Markdown table
parent
917410f5
No related branches found
No related tags found
1 merge request
!39
Utils method to print a Markdown table
Pipeline
#150626
passed
1 year ago
Stage: test
Stage: release
Changes
2
Pipelines
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
nerval/evaluate.py
+7
-15
7 additions, 15 deletions
nerval/evaluate.py
nerval/utils.py
+26
-24
26 additions, 24 deletions
nerval/utils.py
with
33 additions
and
39 deletions
nerval/evaluate.py
+
7
−
15
View file @
8398b7c8
...
...
@@ -5,7 +5,6 @@ from typing import List
import
editdistance
import
edlib
from
prettytable
import
MARKDOWN
,
PrettyTable
from
nerval.parse
import
(
BEGINNING_POS
,
...
...
@@ -15,7 +14,7 @@ from nerval.parse import (
look_for_further_entity_part
,
parse_bio
,
)
from
nerval.utils
import
print_result_compact
,
print_results
from
nerval.utils
import
print_markdown_table
,
print_result_compact
,
print_results
logger
=
logging
.
getLogger
(
__name__
)
...
...
@@ -391,16 +390,9 @@ def run_multiple(file_csv: Path, folder: Path, threshold: int, verbose: bool):
raise
Exception
(
"
No file were counted
"
)
logger
.
info
(
"
Average score on all corpus
"
)
table
=
PrettyTable
()
table
.
field_names
=
[
"
Precision
"
,
"
Recall
"
,
"
F1
"
]
table
.
set_style
(
MARKDOWN
)
table
.
add_rows
(
[
[
round
(
precision
/
count
,
3
),
round
(
recall
/
count
,
3
),
round
(
f1
/
count
,
3
),
],
],
)
print
(
table
)
result
=
[
round
(
precision
/
count
,
3
),
round
(
recall
/
count
,
3
),
round
(
f1
/
count
,
3
),
]
print_markdown_table
([
"
Precision
"
,
"
Recall
"
,
"
F1
"
],
[
result
])
This diff is collapsed.
Click to expand it.
nerval/utils.py
+
26
−
24
View file @
8398b7c8
from
prettytable
import
MARKDOWN
,
PrettyTable
def
print_results
(
scores
:
dict
):
def
print_markdown_table
(
header
:
list
[
str
],
rows
:
list
[
list
])
->
None
:
"""
Prints a Markdown table filled with the provided header and rows.
"""
table
=
PrettyTable
()
table
.
field_names
=
header
table
.
set_style
(
MARKDOWN
)
table
.
add_rows
(
rows
)
print
(
table
)
def
print_results
(
scores
:
dict
)
->
None
:
"""
Display final results.
None values are kept to indicate the absence of a certain tag in either annotation or prediction.
"""
header
=
[
"
tag
"
,
"
predicted
"
,
"
matched
"
,
"
Precision
"
,
"
Recall
"
,
"
F1
"
,
"
Support
"
]
results
=
[]
for
tag
in
sorted
(
scores
,
reverse
=
True
):
prec
=
None
if
scores
[
tag
][
"
P
"
]
is
None
else
round
(
scores
[
tag
][
"
P
"
],
3
)
...
...
@@ -25,29 +33,23 @@ def print_results(scores: dict):
],
)
table
=
PrettyTable
()
table
.
field_names
=
header
table
.
set_style
(
MARKDOWN
)
table
.
add_rows
(
results
)
print
(
table
)
print_markdown_table
(
[
"
tag
"
,
"
predicted
"
,
"
matched
"
,
"
Precision
"
,
"
Recall
"
,
"
F1
"
,
"
Support
"
],
results
,
)
def
print_result_compact
(
scores
:
dict
):
header
=
[
"
tag
"
,
"
predicted
"
,
"
matched
"
,
"
Precision
"
,
"
Recall
"
,
"
F1
"
,
"
Support
"
]
def
print_result_compact
(
scores
:
dict
)
->
None
:
result
=
[
[
"
All
"
,
scores
[
"
All
"
][
"
predicted
"
],
scores
[
"
All
"
][
"
matched
"
],
round
(
scores
[
"
All
"
][
"
P
"
],
3
),
round
(
scores
[
"
All
"
][
"
R
"
],
3
),
round
(
scores
[
"
All
"
][
"
F1
"
],
3
),
scores
[
"
All
"
][
"
Support
"
],
],
"
All
"
,
scores
[
"
All
"
][
"
predicted
"
],
scores
[
"
All
"
][
"
matched
"
],
round
(
scores
[
"
All
"
][
"
P
"
],
3
),
round
(
scores
[
"
All
"
][
"
R
"
],
3
),
round
(
scores
[
"
All
"
][
"
F1
"
],
3
),
scores
[
"
All
"
][
"
Support
"
],
]
table
=
PrettyTable
()
table
.
field_names
=
header
table
.
set_style
(
MARKDOWN
)
table
.
add_rows
(
result
)
print
(
table
)
print_markdown_table
(
[
"
tag
"
,
"
predicted
"
,
"
matched
"
,
"
Precision
"
,
"
Recall
"
,
"
F1
"
,
"
Support
"
],
[
result
],
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment