lvwerra HF staff loubnabnl HF staff commited on
Commit
7a6f200
1 Parent(s): 4d1f3e5

Update space format (#1)

Browse files

- Update space format (8078e2c542da0aae5abbe29bb6c0593e8d943df4)


Co-authored-by: loubna ben allal <loubnabnl@users.noreply.huggingface.co>

Files changed (1) hide show
  1. app.py +36 -40
app.py CHANGED
@@ -3,13 +3,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
3
  from transformers import pipeline
4
  import os
5
 
6
- title = "Santa Model Generator"
7
- description = "Demo"
8
- example = [
9
- ["def print_hello_world():", 8, 0.6, 42],
10
- ["def get_file_size(filepath):", 24, 0.6, 42],
11
- ["def count_lines(filename):", 40, 0.6, 42],
12
- ["def count_words(filename):", 40, 0.6, 42]]
13
 
14
  token = os.environ["HUB_TOKEN"]
15
  device="cuda:0"
@@ -26,37 +21,38 @@ def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42):
26
  return generated_text
27
 
28
 
29
- iface = gr.Interface(
30
- fn=code_generation,
31
- inputs=[
32
- gr.Textbox(lines=10, label="Input code"),
33
- gr.inputs.Slider(
34
- minimum=8,
35
- maximum=1000,
36
- step=1,
37
- default=8,
38
- label="Number of tokens to generate",
39
- ),
40
- gr.inputs.Slider(
41
- minimum=0,
42
- maximum=2.5,
43
- step=0.1,
44
- default=0.6,
45
- label="Temperature",
46
- ),
47
- gr.inputs.Slider(
48
- minimum=0,
49
- maximum=1000,
50
- step=1,
51
- default=42,
52
- label="Random seed to use for the generation"
53
- )
54
- ],
55
- outputs=gr.Textbox(label="Predicted code", lines=10),
56
- examples=example,
57
- layout="horizontal",
58
- theme="peach",
59
- description=description,
60
- title=title
61
  )
62
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  from transformers import pipeline
4
  import os
5
 
6
+ description = """# <p style="text-align: center; color: white;"> ❄️ <span style='color: #ff75b3;'>IceCoder</span> Generator ❄️ </p>
7
+ <span style='color: white;'>This is a demo to generate code with <a href="todo" style="color: #ff75b3;">IceCoder</a>, a 1.1B model for code generation in Python, Java & JavaScript.</span>"""
 
 
 
 
 
8
 
9
  token = os.environ["HUB_TOKEN"]
10
  device="cuda:0"
 
21
  return generated_text
22
 
23
 
24
+ demo = gr.Blocks(
25
+ css=".gradio-container {background-color: #20233fff; color:white}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  )
27
+ with demo:
28
+ with gr.Row():
29
+ gr.Markdown(value=description)
30
+ with gr.Row():
31
+ with gr.Column():
32
+ code = gr.Textbox(lines=10, label="Input code")
33
+ max_tokens= gr.Slider(
34
+ minimum=8,
35
+ maximum=1000,
36
+ step=1,
37
+ label="Number of tokens to generate",
38
+ )
39
+ temperature = gr.Slider(
40
+ minimum=0.1,
41
+ maximum=2.5,
42
+ step=0.1,
43
+ label="Temperature",
44
+ )
45
+ seed = gr.Slider(
46
+ minimum=0,
47
+ maximum=1000,
48
+ step=1,
49
+ label="Random seed to use for the generation"
50
+ )
51
+ run = gr.Button()
52
+ with gr.Column():
53
+ output = gr.Textbox(lines=10, label="Generated code")
54
+
55
+ event = run.click(code_generation, [code, max_tokens, temperature, seed], output)
56
+ gr.HTML(label="Contact", value="<img src='https://huggingface.co/datasets/bigcode/admin/resolve/main/bigcode_contact.png' alt='contact' style='display: block; margin: auto; max-width: 800px;'>")
57
+
58
+ demo.launch()