Daniel Fried commited on
Commit
2e513e8
1 Parent(s): bac999f

remove old app.py and update README

Browse files
Files changed (2) hide show
  1. README.md +8 -8
  2. app.py +0 -243
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
- title: BigCode - Playground
3
- emoji: 🛝
4
- colorFrom: black
5
- colorTo: black
6
  sdk: gradio
7
- sdk_version: 3.23.0
8
- app_file: app.py
 
9
  pinned: false
10
- duplicated_from: bigcode/bigcode-playground
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: BigCode - Editor
3
+ emoji: 💻
4
+ colorFrom: red
5
+ colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 2.9.1
8
+ python_version: 3.8.13
9
+ app_file: start.py
10
  pinned: false
 
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py DELETED
@@ -1,243 +0,0 @@
1
- import json
2
- import os
3
- import shutil
4
- import requests
5
-
6
- import gradio as gr
7
- from huggingface_hub import Repository
8
- from text_generation import Client
9
-
10
- from share_btn import community_icon_html, loading_icon_html, share_js, share_btn_css
11
-
12
- HF_TOKEN = os.environ.get("HF_TOKEN", None)
13
- API_URL = os.environ.get("API_URL")
14
-
15
- with open("./HHH_prompt.txt", "r") as f:
16
- HHH_PROMPT = f.read() + "\n\n"
17
-
18
- FIM_PREFIX = "<fim_prefix>"
19
- FIM_MIDDLE = "<fim_middle>"
20
- FIM_SUFFIX = "<fim_suffix>"
21
-
22
- END_OF_TEXT = "<|endoftext|>"
23
-
24
- FIM_INDICATOR = "<FILL_HERE>"
25
-
26
- FORMATS = """## Model formats
27
-
28
- The model is pretrained on code and in addition to the pure code data it is formatted with special tokens. E.g. prefixes specifying the source of the file or special tokens separating code from a commit message. See below:
29
-
30
- ### Chat mode
31
- Chat mode prepends the [HHH prompt](https://gist.github.com/jareddk/2509330f8ef3d787fc5aaac67aab5f11#file-hhh_prompt-txt) from Anthropic to the request which conditions the model to be an assistant.
32
-
33
- ### Prefixes
34
- Any combination of the three following prefixes can be found in pure code files:
35
-
36
- ```
37
- <reponame>REPONAME<filename>FILENAME<gh_stars>STARS\ncode<|endoftext|>
38
- ```
39
- STARS can be one of: 0, 1-10, 10-100, 100-1000, 1000+
40
-
41
- ### Commits
42
- The commits data is formatted as follows:
43
- ```
44
- <commit_before>code<commit_msg>text<commit_after>code<|endoftext|>
45
- ```
46
-
47
- ### Jupyter structure
48
- Jupyter notebooks were both trained in form of Python scripts as well as the following structured format:
49
- ```
50
- <start_jupyter><jupyter_text>text<jupyter_code>code<jupyter_output>output<jupyter_text>
51
- ```
52
-
53
- ### Issues
54
- We also trained on GitHub issues using the following formatting:
55
- ```
56
- <issue_start><issue_comment>text<issue_comment>...<issue_closed>
57
- ```
58
-
59
- ### Fill-in-the-middle
60
- Fill in the middle requires rearranging the model inputs. The playground does this for you - all you need is to specify where to fill:
61
- ```
62
- code before<FILL_HERE>code after
63
- ```
64
- """
65
-
66
- theme = gr.themes.Monochrome(
67
- primary_hue="indigo",
68
- secondary_hue="blue",
69
- neutral_hue="slate",
70
- radius_size=gr.themes.sizes.radius_sm,
71
- font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"],
72
- )
73
-
74
- client = Client(
75
- API_URL, headers={"Authorization": f"Bearer {HF_TOKEN}"},
76
- )
77
-
78
- def generate(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, chat_mode=False):
79
-
80
- temperature = float(temperature)
81
- if temperature < 1e-2:
82
- temperature = 1e-2
83
- top_p = float(top_p)
84
- fim_mode = False
85
-
86
- generate_kwargs = dict(
87
- temperature=temperature,
88
- max_new_tokens=max_new_tokens,
89
- top_p=top_p,
90
- repetition_penalty=repetition_penalty,
91
- do_sample=True,
92
- seed=42,
93
- )
94
- if chat_mode:
95
- generate_kwargs.update({"stop_sequences": ["\nHuman", "\n-----"]})
96
-
97
- if chat_mode and FIM_INDICATOR in prompt:
98
- raise ValueError("Chat mode and FIM are mutually exclusive. Choose one or the other.")
99
-
100
- if chat_mode:
101
- chat_prompt = "Human: " + prompt + "\n\nAssistant:"
102
- prompt = HHH_PROMPT + chat_prompt
103
-
104
- if FIM_INDICATOR in prompt:
105
- fim_mode = True
106
- try:
107
- prefix, suffix = prompt.split(FIM_INDICATOR)
108
- print("----prefix----")
109
- print(prefix)
110
- print("----suffix----")
111
- print(suffix)
112
- except:
113
- raise ValueError(f"Only one {FIM_INDICATOR} allowed in prompt!")
114
- # prompt = f"{FIM_PREFIX}{prefix}{FIM_SUFFIX}{suffix}{FIM_MIDDLE}"
115
- prompt = f"{FIM_PREFIX}{prefix}{FIM_SUFFIX}{suffix}{FIM_MIDDLE}"
116
- print("----prompt----")
117
- print(prompt)
118
-
119
- stream = client.generate_stream(prompt, **generate_kwargs)
120
-
121
- if fim_mode:
122
- output = prefix
123
- elif chat_mode:
124
- output = chat_prompt
125
- else:
126
- output = prompt
127
-
128
- print("----stream----")
129
- previous_token = ""
130
- for response in stream:
131
- if response.token.text == END_OF_TEXT:
132
- if fim_mode:
133
- print(suffix)
134
- output += suffix
135
- yield output
136
- break
137
- else:
138
- if chat_mode and response.token.text in ["Human", "-----"] and previous_token=="\n":
139
- return output
140
- else:
141
- print(response.token.text)
142
- output += response.token.text
143
- previous_token = response.token.text
144
- yield output
145
- print("full output:")
146
- print(output)
147
- return output
148
-
149
-
150
- examples = [
151
- "def print_hello_world():",
152
- 'def fibonacci(n: int) -> int:\n """ Compute the n-th Fibonacci number. """',
153
- 'from typing import List, Tuple\n\ndef sum_and_product(numbers: List[int]) -> Tuple[int, int]:\n """ Return the sum and the product of the integers in the list as a tuple. Here is the answer of the exercise"""',
154
- "class ComplexNumbers:"
155
- ]
156
-
157
-
158
- def process_example(args):
159
- for x in generate(args):
160
- pass
161
- return x
162
-
163
- css = ".generating {visibility: hidden}" + share_btn_css
164
-
165
- with gr.Blocks(theme=theme, analytics_enabled=False, css=css) as demo:
166
- with gr.Column():
167
- gr.Markdown(
168
- """\
169
- # BigCode - Playground
170
-
171
- _Note:_ this is an internal playground - please do not share. The deployment can also change and thus the space not work as we continue development.\
172
- """
173
-
174
- )
175
- with gr.Row():
176
- with gr.Column(scale=3):
177
- instruction = gr.Textbox(placeholder="Enter your prompt here", label="Prompt", elem_id="q-input")
178
- submit = gr.Button("Generate", variant="primary")
179
- edit = gr.Button("Edit")
180
- output = gr.Code(elem_id="q-output", interactive=True, language="python")
181
-
182
- with gr.Group(elem_id="share-btn-container"):
183
- community_icon = gr.HTML(community_icon_html, visible=True)
184
- loading_icon = gr.HTML(loading_icon_html, visible=True)
185
- share_button = gr.Button("Share to community", elem_id="share-btn", visible=True)
186
-
187
- gr.Examples(
188
- examples=examples,
189
- inputs=[instruction],
190
- cache_examples=False,
191
- fn=process_example,
192
- outputs=[output],
193
- )
194
- gr.Markdown(FORMATS)
195
-
196
- with gr.Column(scale=1):
197
- chat_mode = gr.Checkbox(
198
- value=False,
199
- label="Chat mode",
200
- info="Uses Anthropic's HHH prompt to turn the model into an assistant."
201
- )
202
- temperature = gr.Slider(
203
- label="Temperature",
204
- value=0.2,
205
- minimum=0.0,
206
- maximum=2.0,
207
- step=0.1,
208
- interactive=True,
209
- info="Higher values produce more diverse outputs",
210
- )
211
- max_new_tokens = gr.Slider(
212
- label="Max new tokens",
213
- value=256,
214
- minimum=0,
215
- maximum=8192,
216
- step=64,
217
- interactive=True,
218
- info="The maximum numbers of new tokens",
219
- )
220
- top_p = gr.Slider(
221
- label="Top-p (nucleus sampling)",
222
- value=0.90,
223
- minimum=0.0,
224
- maximum=1,
225
- step=0.05,
226
- interactive=True,
227
- info="Higher values sample more low-probability tokens",
228
- )
229
- repetition_penalty = gr.Slider(
230
- label="Repetition penalty",
231
- value=1.2,
232
- minimum=1.0,
233
- maximum=2.0,
234
- step=0.05,
235
- interactive=True,
236
- info="Penalize repeated tokens",
237
- )
238
-
239
- submit.click(generate, inputs=[instruction, temperature, max_new_tokens, top_p, repetition_penalty, chat_mode], outputs=[output])
240
- edit.click(generate, inputs=[output, temperature, max_new_tokens, top_p, repetition_penalty, chat_mode], outputs=[output])
241
- # instruction.submit(generate, inputs=[instruction, temperature, max_new_tokens, top_p, repetition_penalty, chat_mode], outputs=[output])
242
- share_button.click(None, [], [], _js=share_js)
243
- demo.queue(concurrency_count=16).launch(debug=True)