lvwerra HF staff commited on
Commit
b161d65
1 Parent(s): 6b12960

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -33
app.py CHANGED
@@ -18,36 +18,65 @@ def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42):
18
  return generated_text
19
 
20
 
21
- demo = gr.Blocks()
22
- with demo:
23
- with gr.Row():
24
- gr.Markdown(value=description)
25
- with gr.Row():
26
- with gr.Column():
27
- code = gr.Textbox(lines=10, label="Input code")
28
- max_tokens= gr.Slider(
29
- minimum=8,
30
- maximum=1000,
31
- step=1,
32
- label="Number of tokens to generate",
33
- )
34
- temperature = gr.Slider(
35
- minimum=0.1,
36
- maximum=2.5,
37
- step=0.1,
38
- label="Temperature",
39
- )
40
- seed = gr.Slider(
41
- minimum=0,
42
- maximum=1000,
43
- step=1,
44
- label="Random seed to use for the generation"
45
- )
46
- run = gr.Button()
47
- with gr.Column():
48
- output = gr.Textbox(lines=10, label="Generated code")
49
-
50
- event = run.click(code_generation, [code, max_tokens, temperature, seed], output)
51
- gr.HTML(label="Contact", value="<img src='https://huggingface.co/datasets/bigcode/admin/resolve/main/bigcode_contact.png' alt='contact' style='display: block; margin: auto; max-width: 800px;'>")
52
-
53
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  return generated_text
19
 
20
 
21
+ import gradio as gr
22
+ from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
23
+ from transformers import pipeline
24
+ import os
25
+
26
+ title = "Santa Model Generator"
27
+ description = "Demo"
28
+ example = [
29
+ ["def print_hello_world():", 8, 0.6, 42],
30
+ ["def get_file_size(filepath):", 24, 0.6, 42],
31
+ ["def count_lines(filename):", 40, 0.6, 42],
32
+ ["def count_words(filename):", 40, 0.6, 42]]
33
+
34
+ token = os.environ["HUB_TOKEN"]
35
+ device="cuda:0"
36
+ revision = "dedup-alt-comments"
37
+
38
+ tokenizer = AutoTokenizer.from_pretrained("bigcode/christmas-models", use_auth_token=token)
39
+ model = AutoModelForCausalLM.from_pretrained("bigcode/christmas-models", revision=revision, trust_remote_code=True, use_auth_token=token)
40
+
41
+
42
+ def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42):
43
+ set_seed(seed)
44
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
45
+ generated_text = pipe(gen_prompt, do_sample=True, top_p=0.95, temperature=temperature, max_new_tokens=max_tokens)[0]['generated_text']
46
+ return generated_text
47
+
48
+
49
+ iface = gr.Interface(
50
+ fn=code_generation,
51
+ inputs=[
52
+ gr.Textbox(lines=10, label="Input code"),
53
+ gr.inputs.Slider(
54
+ minimum=8,
55
+ maximum=1000,
56
+ step=1,
57
+ default=8,
58
+ label="Number of tokens to generate",
59
+ ),
60
+ gr.inputs.Slider(
61
+ minimum=0,
62
+ maximum=2.5,
63
+ step=0.1,
64
+ default=0.6,
65
+ label="Temperature",
66
+ ),
67
+ gr.inputs.Slider(
68
+ minimum=0,
69
+ maximum=1000,
70
+ step=1,
71
+ default=42,
72
+ label="Random seed to use for the generation"
73
+ )
74
+ ],
75
+ outputs=gr.Textbox(label="Predicted code", lines=10),
76
+ examples=example,
77
+ layout="horizontal",
78
+ theme="peach",
79
+ description=description,
80
+ title=title
81
+ )
82
+ iface.launch()