JP-SystemsX commited on
Commit
8d7d29c
1 Parent(s): 58bc693

Some tests to better understand compute score

Browse files
Files changed (2) hide show
  1. Testing.py +18 -0
  2. requirements.txt +3 -2
Testing.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import evaluate as ev
2
+ from nDCG import nDCG
3
+
4
+ metric = nDCG(cache_dir="cache")
5
+ a = [1,2,3,4,5]
6
+ b = [1,2,3,4,5]
7
+ c = [1,2,3,4,0]
8
+
9
+
10
+ #metric.add(prediction=a, reference=b)
11
+ metric.add(prediction=c, reference=b)
12
+ metric.add(prediction=c, reference=b)
13
+ metric.add(prediction=c, reference=b)
14
+ print(metric.compute(predictions=[a], references=[b]))
15
+ print(metric.compute(predictions=[a], references=[c]))
16
+ print(metric.compute(predictions=[a], references=[c]))
17
+ print(metric.compute(predictions=[a,a], references=[c,a]))
18
+ print(metric.cache_file_name)
requirements.txt CHANGED
@@ -1,2 +1,3 @@
1
- git+https://github.com/huggingface/evaluate@dd35e04844c71068318335e05d8a658200dafe89
2
- scikit-learn
 
 
1
+ scikit-learn
2
+ datasets
3
+ evaluate