holly123 commited on
Commit
31a2f8c
1 Parent(s): 9299d17

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -0
app.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed, pipeline
3
+
4
+
5
+ title = "Code Generator"
6
+ description = "This is a space to convert english text to Python code using with [codeparrot-small-text-to-code](https://huggingface.co/codeparrot/codeparrot-small-text-to-code),\
7
+ a code generation model for Python finetuned on [github-jupyter-text](https://huggingface.co/datasets/codeparrot/github-jupyter-text) a dataset of doctrings\
8
+ and their Python code extracted from Jupyter notebooks."
9
+ example = [
10
+ ["Utility function to compute the accuracy of predictions using metric from sklearn", 65, 0.6, 42],
11
+ ["Let's implement a function that computes the size of a file called filepath", 60, 0.6, 42],
12
+ ["Let's implement bubble sort in a helper function:", 87, 0.6, 42],
13
+ ]
14
+
15
+ # change model to the finetuned one
16
+ tokenizer = AutoTokenizer.from_pretrained("codeparrot/codeparrot-small-text-to-code")
17
+ model = AutoModelForCausalLM.from_pretrained("codeparrot/codeparrot-small-text-to-code")
18
+
19
+ def make_doctring(gen_prompt):
20
+ return "\"\"\"\n" + gen_prompt + "\n\"\"\"\n\n"
21
+
22
+ def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42):
23
+ set_seed(seed)
24
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
25
+ prompt = make_doctring(gen_prompt)
26
+ generated_text = pipe(prompt, do_sample=True, top_p=0.95, temperature=temperature, max_new_tokens=max_tokens)[0]['generated_text']
27
+ return generated_text
28
+
29
+
30
+ iface = gr.Interface(
31
+ fn=code_generation,
32
+ inputs=[
33
+ gr.Code(lines=10, language="python", label="English instructions"),
34
+ gr.inputs.Slider(
35
+ minimum=8,
36
+ maximum=256,
37
+ step=1,
38
+ default=8,
39
+ label="Number of tokens to generate",
40
+ ),
41
+ gr.inputs.Slider(
42
+ minimum=0,
43
+ maximum=2.5,
44
+ step=0.1,
45
+ default=0.6,
46
+ label="Temperature",
47
+ ),
48
+ gr.inputs.Slider(
49
+ minimum=0,
50
+ maximum=1000,
51
+ step=1,
52
+ default=42,
53
+ label="Random seed to use for the generation"
54
+ )
55
+ ],
56
+ outputs=gr.Code(label="Predicted Python code", language="python", lines=10),
57
+ examples=example,
58
+ layout="horizontal",
59
+ theme="peach",
60
+ description=description,
61
+ title=title
62
+ )
63
+ iface.launch()