orionweller commited on
Commit
ef64719
1 Parent(s): 804b9f6

add some examples

Browse files
Files changed (2) hide show
  1. app.py +18 -2
  2. requirements.txt +1 -0
app.py CHANGED
@@ -1,14 +1,30 @@
1
  import gradio as gr
2
  import gem_metrics
 
 
3
 
4
- def calc_rouge_score(pred, gold):
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  preds = gem_metrics.texts.Predictions(pred)
 
6
  refs = gem_metrics.texts.References(gold)
7
  result = gem_metrics.compute(preds, refs, metrics_list=['bleu', 'rouge'])
8
  return result
9
 
10
  iface = gr.Interface(
11
  fn=calc_rouge_score,
12
- inputs=["text", "text"],
13
  outputs=["text"])
14
  iface.launch()
1
  import gradio as gr
2
  import gem_metrics
3
+ from datasets import load_dataset
4
+ import random
5
 
6
+ random.seed(42)
7
+
8
+ raw_datasets = load_dataset("eli5")
9
+ validation_len = len(raw_datasets["validation"])
10
+ random_insts = [random.randint(validation_len)]
11
+ questions = []
12
+ q2ref = {}
13
+ for inst in random_insts:
14
+ question = raw_datasets["validation_eli5"][inst]["title"]
15
+ ref = raw_datasets["validation_eli5"][inst]["answers"]["text"]
16
+ questions.append(question)
17
+ q2ref[question] = ref
18
+
19
+ def calc_rouge_score(og_question, pred):
20
  preds = gem_metrics.texts.Predictions(pred)
21
+ gold = q2ref[og_question]
22
  refs = gem_metrics.texts.References(gold)
23
  result = gem_metrics.compute(preds, refs, metrics_list=['bleu', 'rouge'])
24
  return result
25
 
26
  iface = gr.Interface(
27
  fn=calc_rouge_score,
28
+ inputs=[gr.Dropdown(questions), "text"],
29
  outputs=["text"])
30
  iface.launch()
requirements.txt CHANGED
@@ -1 +1,2 @@
1
  git+https://github.com/GEM-benchmark/GEM-metrics.git
 
1
  git+https://github.com/GEM-benchmark/GEM-metrics.git
2
+ datasets