Vipitis commited on
Commit
681b67d
1 Parent(s): 9d3e407

added a demo to the app

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. ShaderEval.py +2 -2
  3. app.py +35 -4
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ *.pyc
ShaderEval.py CHANGED
@@ -158,7 +158,7 @@ class ReturnGenerationEvaluator(evaluate.TextGenerationEvaluator):
158
  class Suite(evaluate.EvaluationSuite):
159
 
160
 
161
- def __init__(self, name):
162
  super().__init__(name)
163
  self.preprocessor = lambda x: {"return_statement": x["return_statement"].split(";")[0]} #like this? refactored to RetrunGenerationEvaluator
164
  self.suite = [
@@ -167,7 +167,7 @@ class Suite(evaluate.EvaluationSuite):
167
  task_type="text-generation", #this call an evaluator, but can you specify your own custom evaluator instead?
168
  data="Vipitis/Shadertoys-fine",
169
  subset="return_completion",
170
- split="test[5:10]", #[5:10] is for testing to make it quick, and they got some easy examples, unless the first 5.
171
  args_for_task={
172
  # "metric": "exact_match",
173
  "input_column": "body",
 
158
  class Suite(evaluate.EvaluationSuite):
159
 
160
 
161
+ def __init__(self, name, snippet: int = ""):
162
  super().__init__(name)
163
  self.preprocessor = lambda x: {"return_statement": x["return_statement"].split(";")[0]} #like this? refactored to RetrunGenerationEvaluator
164
  self.suite = [
 
167
  task_type="text-generation", #this call an evaluator, but can you specify your own custom evaluator instead?
168
  data="Vipitis/Shadertoys-fine",
169
  subset="return_completion",
170
+ split=f"test[:{snippet}]", # use this to select a subset of the data during testing, perhaps remove later?
171
  args_for_task={
172
  # "metric": "exact_match",
173
  "input_column": "body",
app.py CHANGED
@@ -1,7 +1,38 @@
1
  import gradio as gr
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!\n This space hosts the ShaderEval Suite. more to follow soon."
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import evaluate
3
+ from ShaderEval import Suite
4
 
5
+ # suite = evaluate.EvaluationSuite.load("Vipitis/ShaderEval") #downloads it
6
+ suite = Suite("Vipitis/ShaderEval") #local to the space?
7
 
8
+ def run_suite(model_cp, snippet):
9
+ suite = Suite("Vipitis/ShaderEval", snippet)
10
+ results = suite.run(model_cp)
11
+ return results[0]["exact_match"]
12
+
13
+ with gr.Blocks() as site:
14
+ text = gr.Markdown("""# Welcome to the ShaderEval Suite.
15
+
16
+ This space hosts the ShaderEval Suite. more to follow soon.
17
+
18
+ ## Instructions
19
+ ### Run the code yourself:.
20
+ ```python
21
+ import evaluate
22
+ suite = evaluate.EvaluationSuite.load("Vipitis/ShaderEval")
23
+ model_cp = "gpt2"
24
+ suite.run("model_cp")
25
+ ```
26
+
27
+ ### try the demo below
28
+ - Select a **model checkpoint** from the "dropdown"
29
+ - Select how many **samples** to run (there us up to 100 from the test set)
30
+ - Click **Run** to run the suite
31
+ - The results will be displayed in the **Output** box
32
+ """)
33
+ model_cp = gr.Textbox(label="Model Checkpoint")
34
+ first_n = gr.Slider(minimum=1, maximum=100, default=10, label="num_samples", step=1.0)
35
+ output = gr.Textbox(label="Output")
36
+ run_button = gr.Button(label="Run")
37
+ run_button.click(fn=run_suite, inputs=[model_cp, first_n], outputs=output)
38
+ site.launch()