SudharsanSundar commited on
Commit
35a1018
1 Parent(s): 0b9a1f2

Simple readme display now

Browse files
Files changed (1) hide show
  1. app.py +54 -17
app.py CHANGED
@@ -2,24 +2,61 @@ import evaluate
2
  import gradio as gr
3
  from evaluate.utils import launch_gradio_widget
4
 
5
- # token_edit_distance_metric = evaluate.load("SudharsanSundar/token_edit_distance")
6
- # launch_gradio_widget(module)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
 
8
 
9
- def evaluate_metric(pred, ref):
10
- pred = map(int, pred)
11
- ref = map(int, ref)
12
- return token_edit_distance_metric.compute(predictions=[pred], references=[ref])['avg_token_edit_distance']
 
 
 
 
 
 
13
 
14
 
15
- demo = gr.Interface(
16
- fn=evaluate_metric,
17
- inputs=[gr.Dataframe(row_count = (4, "dynamic"),
18
- col_count=(2,"fixed"),
19
- label="Input Data",
20
- interactive=1,
21
- headers=['Predictions', 'References'],
22
- datatype="number")],
23
- outputs="number"
24
- )
25
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import gradio as gr
3
  from evaluate.utils import launch_gradio_widget
4
 
5
+ with gr.Blocks() as demo:
6
+ gr.Markdown(
7
+ """
8
+ **Token Edit Distance**
9
+ This is an NLP evaluation metric that records the minimum number of token edits (insertions, deletions, and replacements, all weighted equally) to the prediction string in order to make it exactly match the reference string. Uses identical logic to Levenshtein Edit Distance, except applied to tokens (i.e. individual ints in a list) as opposed to individual characters in a string.
10
+ <br/><br/>
11
+ *Args*:
12
+ * predictions: ```List[List[Int]]```, list of predictions to score.
13
+ * Each prediction should be tokenized into a list of tokens.
14
+ * references: ```List[List[Int]]```, list of references/ground truth output to score against.
15
+ * Each reference should be tokenized into a list of tokens.
16
+
17
+ <br/><br/>
18
+
19
+ *Returns*:
20
+ * "avg_token_edit_distance": ```Float```, average Token Edit Distance for all inputted predictions and references
21
+ * "token_edit_distances": ```List[Int]```, the Token Edit Distance for each inputted prediction and reference
22
 
23
+ <br/><br/>
24
 
25
+ *Examples*:
26
+ ```
27
+ >>> token_edit_distance_metric = datasets.load_metric('Token Edit Distance')
28
+ >>> references = [[15, 4243], [100, 10008]]
29
+ >>> predictions = [[15, 4243], [100, 10009]]
30
+ >>> results = token_edit_distance_metric.compute(predictions=predictions, references=references)
31
+ >>> print(results)
32
+ {'avg_token_edit_distance': 0.5, 'token_edit_distances': array([0. 1.])}
33
+ ```
34
+ """)
35
 
36
 
37
+ if __name__ == "__main__":
38
+ demo.launch()
39
+
40
+ # JUNKYARD
41
+ # token_edit_distance_metric = evaluate.load("SudharsanSundar/token_edit_distance")
42
+ # launch_gradio_widget(module)
43
+ #
44
+ # def evaluate_metric(table):
45
+ # pred = table[]
46
+ # pred = map(int, pred)
47
+ # ref = map(int, ref)
48
+ # return token_edit_distance_metric.compute(predictions=[pred], references=[ref])['avg_token_edit_distance']
49
+ #
50
+ #
51
+ # demo = gr.Interface(
52
+ # fn=evaluate_metric,
53
+ # inputs=[gr.Dataframe(row_count = (4, "dynamic"),
54
+ # col_count=(2,"fixed"),
55
+ # label="Input Data",
56
+ # interactive=1,
57
+ # headers=['Predictions', 'References'],
58
+ # datatype="number")],
59
+ # outputs="number",
60
+ # description=""
61
+ # )
62
+ # demo.launch()