Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
G
gogole
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Container Registry
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Dos Santos David
gogole
Commits
5ddcd259
Commit
5ddcd259
authored
Jan 30, 2018
by
Prot Alexandre
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
adding evaluation metrics
parent
4f6a914f
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
45 additions
and
5 deletions
+45
-5
README.md
README.md
+3
-1
eval_command.py
gogole/commands/eval_command.py
+42
-4
No files found.
README.md
View file @
5ddcd259
...
...
@@ -72,6 +72,8 @@ gogole > search -v --norm-freq <query> # fréquences normalisées
Sur la collection cacm uniquement, il est possible d'évaluer la pertinence des recherches vectorielles effectuées avec les différentes pondérations.
```
python
gogole
>
eval
<
n
>
# where n is a number of queries to evaluate
```
NB: L'évaluation demande aussi d'avoir construit l'index avec la commande
`index build`
\ No newline at end of file
gogole/commands/eval_command.py
View file @
5ddcd259
...
...
@@ -3,17 +3,23 @@ from gogole.query import vectorial_query
from
gogole.parser
import
CACMParser
from
gogole.parser
import
QRelsParser
def
run
(
collection
,
args
):
# Runs the CACM Parser on the queries file with the same structure
cacm_parser
=
CACMParser
(
"data/query.text"
)
nrequests
=
int
(
args
.
nrequests
[
0
])
if
nrequests
<
1
:
print
(
"Please enter a non-zero number of test queries"
)
return
qrels_parser
=
QRelsParser
()
relevant_docs_by_query
=
qrels_parser
.
parse_all
(
nrequests
)
for
weight_type
in
vectorial_query
.
WEIGHTING_TYPES
:
precision_sum
=
0
avg_mean_precision_sum
=
0
recall_sum
=
0
nb_queries
=
0
...
...
@@ -26,11 +32,11 @@ def run(collection, args):
all_results
,
t
=
query_browser
.
timed_search
(
q
)
n_results
=
[
res
for
idx
,
res
in
enumerate
(
query_browser
.
find_n_first_elements
(
all_results
,
n
=
10
),
start
=
1
)]
# If there is nothing for this query id, drop it
if
document
.
document_id
not
in
relevant_docs_by_query
:
continue
relevant_docs
=
relevant_docs_by_query
[
document
.
document_id
]
intersection_docs
=
[
res
for
res
in
n_results
if
res
in
relevant_docs
]
...
...
@@ -41,11 +47,43 @@ def run(collection, args):
precision
=
0
recall
=
len
(
intersection_docs
)
/
len
(
relevant_docs
)
avg_mean_precision
=
compute_avg_mean_precision
(
n_results
,
relevant_docs
)
precision_sum
+=
precision
recall_sum
+=
recall
avg_mean_precision_sum
+=
avg_mean_precision
nb_queries
+=
1
precision
=
precision_sum
/
nb_queries
recall
=
recall_sum
/
nb_queries
print
(
"for weight {weight}: precision: {precision}, rappel: {recall}"
.
format
(
weight
=
weight_type
,
precision
=
precision
,
recall
=
recall
))
avg_mean_precision
=
avg_mean_precision_sum
/
nb_queries
print
(
"
\n\n
{:*^50}
\n
"
.
format
(
" Evaluation metrics for weight {weight_type} "
.
format
(
weight_type
=
weight_type
)))
print
(
"precision:
\t
{0:.3g}"
.
format
(
precision
))
print
(
"recall:
\t
{0:.3g}"
.
format
(
recall
))
print
(
"MA Precision:
\t
{0:.3g}"
.
format
(
avg_mean_precision
))
if
precision
*
recall
!=
0
:
beta
=
precision
/
recall
e_measure
=
1
-
((
beta
**
2
)
+
1
)
*
precision
*
recall
/
((
beta
**
2
)
*
precision
+
recall
)
f_measure
=
1
-
e_measure
f1_measure
=
(
2
*
precision
*
recall
)
/
(
precision
+
recall
)
print
(
"E-Measure:
\t
{0:.3g}"
.
format
(
e_measure
))
print
(
"F1-Measure:
\t
{0:.3g}"
.
format
(
f1_measure
))
def
compute_avg_mean_precision
(
doc_ids
,
relevant_doc_ids
):
idx
=
0
nb_good_results
=
0
avg_mean_precision
=
0
for
res
in
doc_ids
:
idx
+=
1
if
res
in
relevant_doc_ids
:
nb_good_results
+=
1
avg_mean_precision
+=
nb_good_results
/
idx
if
nb_good_results
>
0
:
avg_mean_precision
=
avg_mean_precision
/
nb_good_results
return
avg_mean_precision
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment