lambdaofgod commited on
Commit
c5a2694
1 Parent(s): 34c16bd

test metrics for best models

Browse files
app_implementation.py CHANGED
@@ -26,7 +26,7 @@ class RetrievalApp:
26
 
27
  def get_device_options(self):
28
  if self.is_cuda_available():
29
- return ["cpu", "cuda"]
30
  else:
31
  return ["cpu"]
32
 
 
26
 
27
  def get_device_options(self):
28
  if self.is_cuda_available():
29
+ return ["cuda", "cpu"]
30
  else:
31
  return ["cpu"]
32
 
assets/nbow_dependencies-nbow-nbow-mnrl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"accuracy@k": {"1": 0.3388, "3": 0.4821, "5": 0.5212, "10": 0.5896}, "precision@k": {"1": 0.3388, "3": 0.2888, "5": 0.2502, "10": 0.2088}, "recall@k": {"1": 0.0481, "3": 0.0953, "5": 0.1204, "10": 0.1725}, "ndcg@k": {"10": 0.2811}, "mrr@k": {"10": 0.419}, "map@k": {"10": 0.2044}, "columns": "dependencies", "model_name": "nbow_dependencies-nbow-nbow-mnrl"}
assets/nbow_readme-nbow-nbow-mnrl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"accuracy@k": {"1": 0.3225, "3": 0.4821, "5": 0.5537, "10": 0.6189}, "precision@k": {"1": 0.3225, "3": 0.2899, "5": 0.2495, "10": 0.2007}, "recall@k": {"1": 0.0451, "3": 0.0982, "5": 0.1281, "10": 0.1706}, "ndcg@k": {"10": 0.2748}, "mrr@k": {"10": 0.4175}, "map@k": {"10": 0.1938}, "columns": "readme", "model_name": "nbow_readme-nbow-nbow-mnrl"}
assets/nbow_readme_dependencies-nbow-nbow-mnrl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"accuracy@k": {"1": 0.4235, "3": 0.5375, "5": 0.5896, "10": 0.6482}, "precision@k": {"1": 0.4235, "3": 0.342, "5": 0.3199, "10": 0.2557}, "recall@k": {"1": 0.063, "3": 0.1208, "5": 0.1642, "10": 0.2273}, "ndcg@k": {"10": 0.3508}, "mrr@k": {"10": 0.4955}, "map@k": {"10": 0.2688}, "columns": "readme_dependencies", "model_name": "nbow_readme_dependencies-nbow-nbow-mnrl"}
assets/nbow_titles-nbow-nbow-mnrl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"accuracy@k": {"1": 0.6645, "3": 0.7622, "5": 0.8046, "10": 0.8306}, "precision@k": {"1": 0.6645, "3": 0.5765, "5": 0.5121, "10": 0.4075}, "recall@k": {"1": 0.1201, "3": 0.2775, "5": 0.3462, "10": 0.4477}, "ndcg@k": {"10": 0.6024}, "mrr@k": {"10": 0.7198}, "map@k": {"10": 0.5262}, "columns": "titles", "model_name": "nbow_titles-nbow-nbow-mnrl"}
assets/nbow_titles_dependencies-nbow-nbow-mnrl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"accuracy@k": {"1": 0.4169, "3": 0.5179, "5": 0.57, "10": 0.6352}, "precision@k": {"1": 0.4169, "3": 0.3333, "5": 0.3003, "10": 0.241}, "recall@k": {"1": 0.0537, "3": 0.107, "5": 0.1518, "10": 0.2045}, "ndcg@k": {"10": 0.3294}, "mrr@k": {"10": 0.4847}, "map@k": {"10": 0.2493}, "columns": "titles_dependencies", "model_name": "nbow_titles_dependencies-nbow-nbow-mnrl"}
pages/2_Statistics.py CHANGED
@@ -1,6 +1,30 @@
1
  import pandas as pd
2
  import streamlit as st
3
  import config
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  best_results_df = pd.read_csv(config.best_tasks_path)
6
 
 
1
  import pandas as pd
2
  import streamlit as st
3
  import config
4
+ from pathlib import Path as P
5
+ import json
6
+
7
+
8
+ nbow_results_path = P("assets").glob("nbow*")
9
+
10
+ def display_metrics_dict(metrics, display_only_accuracy):
11
+ model_name = metrics.pop("model_name")
12
+ columns = metrics.pop("columns").split("_")
13
+ st.markdown(f"### columns: {columns}")
14
+ st.markdown(f"best model {model_name}")
15
+ if not display_only_accuracy:
16
+ st.json(metrics)
17
+ else:
18
+ st.json({"accuracy@10": metrics["accuracy@k"]["10"]})
19
+
20
+ def display_metrics():
21
+ display_only_accuracy = st.sidebar.checkbox("display only accuracy@10", value=True)
22
+ st.markdown("## Test metrics for best validation modelon given columns")
23
+ for p in nbow_results_path:
24
+ metrics = json.loads(open(p, "r").read())
25
+ display_metrics_dict(metrics, display_only_accuracy)
26
+
27
+ display_metrics()
28
 
29
  best_results_df = pd.read_csv(config.best_tasks_path)
30