stillerman commited on
Commit
d17f973
0 Parent(s):

Duplicate from stillerman/santacoder-ruby-demo

Browse files
Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +134 -0
  4. requirements.txt +2 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: SantaCoder-Ruby Demo
3
+ emoji: 🎅
4
+ colorFrom: blue
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 3.13.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: stillerman/santacoder-ruby-demo
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
3
+ from transformers import pipeline
4
+ import os
5
+ import torch
6
+
7
+ description = """# <p style="text-align: center; color: white;"> 🎅 <span style='color: #ff75b3;'>SantaCoder-Ruby:</span> Code Generation </p>
8
+ <span style='color: white;'>This is a demo to generate code with <a href="https://huggingface.co/stillerman/santacoder-ruby" style="color: #ff75b3;">SantaCoder-Ruby</a> which is a fine-tuned version of <a href="https://huggingface.co/bigcode/santacoder" style="color: #ff75b3;">SantaCoder</a>,
9
+ a 1.1B parameter model for code generation in Python, Java & JavaScript. The model can also do infilling, just specify where you would like the model to complete code
10
+ with the <span style='color: #ff75b3;'>&lt;FILL-HERE&gt;</span> token.</span>"""
11
+
12
+ token = os.environ["HUB_TOKEN"]
13
+ device="cpu"
14
+
15
+
16
+ FIM_PREFIX = "<fim-prefix>"
17
+ FIM_MIDDLE = "<fim-middle>"
18
+ FIM_SUFFIX = "<fim-suffix>"
19
+ FIM_PAD = "<fim-pad>"
20
+ EOD = "<|endoftext|>"
21
+
22
+ GENERATION_TITLE= "<p style='font-size: 16px; color: white;'>Generated code:</p>"
23
+
24
+ tokenizer_fim = AutoTokenizer.from_pretrained("bigcode/santacoder", use_auth_token=token, padding_side="left")
25
+
26
+ tokenizer_fim.add_special_tokens({
27
+ "additional_special_tokens": [EOD, FIM_PREFIX, FIM_MIDDLE, FIM_SUFFIX, FIM_PAD],
28
+ "pad_token": EOD,
29
+ })
30
+
31
+ tokenizer = AutoTokenizer.from_pretrained("bigcode/christmas-models", use_auth_token=token)
32
+ model = AutoModelForCausalLM.from_pretrained("stillerman/santacoder-ruby", trust_remote_code=True, use_auth_token=token).to(device)
33
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
34
+
35
+ def post_processing(prompt, completion):
36
+ completion = "<span style='color: #ff75b3;'>" + completion + "</span>"
37
+ prompt = "<span style='color: #727cd6;'>" + prompt + "</span>"
38
+ code_html = f"<br><hr><br><pre style='font-size: 12px'><code>{prompt}{completion}</code></pre><br><hr>"
39
+ return GENERATION_TITLE + code_html
40
+
41
+ def post_processing_fim(prefix, middle, suffix):
42
+ prefix = "<span style='color: #727cd6;'>" + prefix + "</span>"
43
+ middle = "<span style='color: #ff75b3;'>" + middle + "</span>"
44
+ suffix = "<span style='color: #727cd6;'>" + suffix + "</span>"
45
+ code_html = f"<br><hr><br><pre style='font-size: 12px'><code>{prefix}{middle}{suffix}</code></pre><br><hr>"
46
+ return GENERATION_TITLE + code_html
47
+
48
+ def fim_generation(prompt, max_new_tokens, temperature):
49
+ prefix = prompt.split("<FILL-HERE>")[0]
50
+ suffix = prompt.split("<FILL-HERE>")[1]
51
+ [middle] = infill((prefix, suffix), max_new_tokens, temperature)
52
+ return post_processing_fim(prefix, middle, suffix)
53
+
54
+ def extract_fim_part(s: str):
55
+ # Find the index of
56
+ start = s.find(FIM_MIDDLE) + len(FIM_MIDDLE)
57
+ stop = s.find(EOD, start) or len(s)
58
+ return s[start:stop]
59
+
60
+ def infill(prefix_suffix_tuples, max_new_tokens, temperature):
61
+ if type(prefix_suffix_tuples) == tuple:
62
+ prefix_suffix_tuples = [prefix_suffix_tuples]
63
+
64
+ prompts = [f"{FIM_PREFIX}{prefix}{FIM_SUFFIX}{suffix}{FIM_MIDDLE}" for prefix, suffix in prefix_suffix_tuples]
65
+ # `return_token_type_ids=False` is essential, or we get nonsense output.
66
+ inputs = tokenizer_fim(prompts, return_tensors="pt", padding=True, return_token_type_ids=False).to(device)
67
+ with torch.no_grad():
68
+ outputs = model.generate(
69
+ **inputs,
70
+ do_sample=True,
71
+ temperature=temperature,
72
+ max_new_tokens=max_new_tokens,
73
+ pad_token_id=tokenizer.pad_token_id
74
+ )
75
+ # WARNING: cannot use skip_special_tokens, because it blows away the FIM special tokens.
76
+ return [
77
+ extract_fim_part(tokenizer_fim.decode(tensor, skip_special_tokens=False)) for tensor in outputs
78
+ ]
79
+
80
+
81
+ def code_generation(prompt, max_new_tokens, temperature=0.2, seed=42):
82
+ #set_seed(seed)
83
+
84
+ if "<FILL-HERE>" in prompt:
85
+ return fim_generation(prompt, max_new_tokens, temperature=0.2)
86
+ else:
87
+ completion = pipe(prompt, do_sample=True, top_p=0.95, temperature=temperature, max_new_tokens=max_new_tokens)[0]['generated_text']
88
+ completion = completion[len(prompt):]
89
+ return post_processing(prompt, completion)
90
+
91
+
92
+ demo = gr.Blocks(
93
+ css=".gradio-container {background-color: #20233fff; color:white}"
94
+ )
95
+ with demo:
96
+ with gr.Row():
97
+ _, colum_2, _ = gr.Column(scale=1), gr.Column(scale=6), gr.Column(scale=1)
98
+ with colum_2:
99
+ gr.Markdown(value=description)
100
+ code = gr.Textbox(lines=5, label="Input code", value='''def fib(n)
101
+ if n <= 1
102
+ n
103
+ else
104
+ <FILL-HERE>''')
105
+
106
+ with gr.Accordion("Advanced settings", open=False):
107
+ max_new_tokens= gr.Slider(
108
+ minimum=8,
109
+ maximum=1024,
110
+ step=1,
111
+ value=80,
112
+ label="Number of tokens to generate",
113
+ )
114
+ temperature = gr.Slider(
115
+ minimum=0.1,
116
+ maximum=2.5,
117
+ step=0.1,
118
+ value=0.2,
119
+ label="Temperature",
120
+ )
121
+ seed = gr.Slider(
122
+ minimum=0,
123
+ maximum=1000,
124
+ step=1,
125
+ label="Random seed to use for the generation"
126
+ )
127
+ run = gr.Button()
128
+ output = gr.HTML(label="Generated code")
129
+
130
+ event = run.click(code_generation, [code, max_new_tokens, temperature, seed], output, api_name="predict")
131
+ gr.HTML(label="Contact", value="<img src='https://huggingface.co/datasets/bigcode/admin/resolve/main/bigcode_contact.png' alt='contact' style='display: block; margin: auto; max-width: 800px;'>")
132
+
133
+ #demo.launch(share=True)
134
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ torch
2
+ transformers