rootacess commited on
Commit
58332ae
1 Parent(s): 66045de

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +165 -0
  2. requirements.txt +71 -0
app.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+ from text_generation import Client
5
+
6
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
7
+ API_URL = " https://api-inference.huggingface.co/models/BigCode/octocoder"
8
+
9
+ theme = gr.themes.Monochrome(
10
+ primary_hue="indigo",
11
+ secondary_hue="blue",
12
+ neutral_hue="slate",
13
+ radius_size=gr.themes.sizes.radius_sm,
14
+ font=[
15
+ gr.themes.GoogleFont("Open Sans"),
16
+ "ui-sans-serif",
17
+ "system-ui",
18
+ "sans-serif",
19
+ ],
20
+ )
21
+
22
+ client = Client(
23
+ API_URL,
24
+ headers={"Authorization": f"Bearer {HF_TOKEN}"},
25
+ )
26
+
27
+ def generate(query:str, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, ):
28
+ if query.endswith("."):
29
+ prompt = f"Question: {query}\n\nAnswer:"
30
+ else:
31
+ prompt = f"Question: {query}.\n\nAnswer:"
32
+
33
+ temperature = float(temperature)
34
+ if temperature < 1e-2:
35
+ temperature = 1e-2
36
+ top_p = float(top_p)
37
+
38
+ generate_kwargs = dict(
39
+ temperature=temperature,
40
+ max_new_tokens=max_new_tokens,
41
+ top_p=top_p,
42
+ repetition_penalty=repetition_penalty,
43
+ do_sample=True,
44
+ seed=42,
45
+ )
46
+
47
+ stream = client.generate_stream(prompt, **generate_kwargs)
48
+ output = ""
49
+
50
+ previous_token = ""
51
+ for response in stream:
52
+ if response.token.text == "<|endoftext|>":
53
+ return output
54
+ else:
55
+ output += response.token.text
56
+ previous_token = response.token.text
57
+ yield output
58
+ return output
59
+
60
+
61
+ def process_example(args):
62
+ for x in generate(args):
63
+ pass
64
+ return x
65
+
66
+
67
+ css = ".generating {visibility: hidden}"
68
+
69
+ monospace_css = """
70
+ #q-input textarea {
71
+ font-family: monospace, 'Consolas', Courier, monospace;
72
+ }
73
+ """
74
+
75
+ description = """
76
+ <div style="text-align: center;">
77
+ <center><img src='https://raw.githubusercontent.com/bigcode-project/octopack/31f3320f098703c7910e43492c39366eeea68d83/banner.png' width='70%'/></center>
78
+ <br>
79
+ <h1><u> OctoCoder Demo </u></h1>
80
+ </div>
81
+ <br>
82
+ <div style="text-align: center;">
83
+ <p>This is a demo to demonstrate the capabilities of <a href="https://huggingface.co/bigcode/octocoder">OctoCoder</a> model by showing how it can be used to generate code by following the instructions provided in the input.</p>
84
+ <p><strong>OctoCoder</strong> is an instruction tuned model with 15.5B parameters created by finetuning StarCoder on CommitPackFT & OASST</p>
85
+ </div>
86
+ """
87
+ disclaimer = """⚠️<b>Any use or sharing of this demo constitues your acceptance of the BigCode [OpenRAIL-M](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) License Agreement and the use restrictions included within.</b>\
88
+ <br>**Intended Use**: this app and its [supporting model](https://huggingface.co/bigcode) are provided for demonstration purposes; not to serve as replacement for human expertise. For more details on the model's limitations in terms of factuality and biases, see the [model card.](https://huggingface.co/bigcode)"""
89
+
90
+
91
+ examples = [['Please write a function in Python that performs bubble sort.']]
92
+
93
+
94
+ with gr.Blocks(theme=theme, analytics_enabled=False, css=css) as demo:
95
+ with gr.Column():
96
+ gr.Markdown(description)
97
+ with gr.Row():
98
+ with gr.Column():
99
+ with gr.Accordion("Settings", open=True):
100
+ with gr.Row():
101
+ column_1, column_2 = gr.Column(), gr.Column()
102
+ with column_1:
103
+ temperature = gr.Slider(
104
+ label="Temperature",
105
+ value=0.2,
106
+ minimum=0.0,
107
+ maximum=1.0,
108
+ step=0.05,
109
+ interactive=True,
110
+ info="Higher values produce more diverse outputs",
111
+ )
112
+ max_new_tokens = gr.Slider(
113
+ label="Max new tokens",
114
+ value=256,
115
+ minimum=0,
116
+ maximum=8192,
117
+ step=64,
118
+ interactive=True,
119
+ info="The maximum numbers of new tokens",
120
+ )
121
+ with column_2:
122
+ top_p = gr.Slider(
123
+ label="Top-p (nucleus sampling)",
124
+ value=0.90,
125
+ minimum=0.0,
126
+ maximum=1,
127
+ step=0.05,
128
+ interactive=True,
129
+ info="Higher values sample more low-probability tokens",
130
+ )
131
+ repetition_penalty = gr.Slider(
132
+ label="Repetition penalty",
133
+ value=1.2,
134
+ minimum=1.0,
135
+ maximum=2.0,
136
+ step=0.05,
137
+ interactive=True,
138
+ info="Penalize repeated tokens",
139
+ )
140
+
141
+ with gr.Row():
142
+ with gr.Column():
143
+ instruction = gr.Textbox(
144
+ placeholder="Enter your query here",
145
+ lines=5,
146
+ label="Input",
147
+ elem_id="q-input",
148
+ )
149
+ submit = gr.Button("Generate", variant="primary")
150
+ output = gr.Code(elem_id="q-output", lines=30, label="Output")
151
+ gr.Markdown(disclaimer)
152
+ gr.Examples(
153
+ examples=examples,
154
+ inputs=[instruction],
155
+ cache_examples=False,
156
+ fn=process_example,
157
+ outputs=[output],
158
+ )
159
+
160
+ submit.click(
161
+ generate,
162
+ inputs=[instruction, temperature, max_new_tokens, top_p, repetition_penalty],
163
+ outputs=[output],
164
+ )
165
+ demo.queue(concurrency_count=16).launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.2.1
2
+ aiohttp==3.8.5
3
+ aiosignal==1.3.1
4
+ altair==5.0.1
5
+ annotated-types==0.5.0
6
+ anyio==3.7.1
7
+ async-timeout==4.0.2
8
+ attrs==23.1.0
9
+ certifi==2023.7.22
10
+ charset-normalizer==3.2.0
11
+ click==8.1.6
12
+ contourpy==1.1.0
13
+ cycler==0.11.0
14
+ exceptiongroup==1.1.2
15
+ fastapi==0.101.0
16
+ ffmpy==0.3.1
17
+ filelock==3.12.2
18
+ fonttools==4.42.0
19
+ frozenlist==1.4.0
20
+ fsspec==2023.6.0
21
+ gradio==3.39.0
22
+ gradio_client==0.3.0
23
+ h11==0.14.0
24
+ httpcore==0.17.3
25
+ httpx==0.24.1
26
+ huggingface-hub==0.16.4
27
+ idna==3.4
28
+ importlib-resources==6.0.1
29
+ Jinja2==3.1.2
30
+ jsonschema==4.19.0
31
+ jsonschema-specifications==2023.7.1
32
+ kiwisolver==1.4.4
33
+ linkify-it-py==2.0.2
34
+ markdown-it-py==2.2.0
35
+ MarkupSafe==2.1.3
36
+ matplotlib==3.7.2
37
+ mdit-py-plugins==0.3.3
38
+ mdurl==0.1.2
39
+ multidict==6.0.4
40
+ numpy==1.24.4
41
+ orjson==3.9.4
42
+ packaging==23.1
43
+ pandas==2.0.3
44
+ Pillow==10.0.0
45
+ pkgutil_resolve_name==1.3.10
46
+ pydantic==1.10.12
47
+ pydantic_core==2.4.0
48
+ pydub==0.25.1
49
+ pyparsing==3.0.9
50
+ python-dateutil==2.8.2
51
+ python-multipart==0.0.6
52
+ pytz==2023.3
53
+ PyYAML==6.0.1
54
+ referencing==0.30.2
55
+ requests==2.31.0
56
+ rpds-py==0.9.2
57
+ semantic-version==2.10.0
58
+ six==1.16.0
59
+ sniffio==1.3.0
60
+ starlette==0.27.0
61
+ text-generation==0.6.0
62
+ toolz==0.12.0
63
+ tqdm==4.66.0
64
+ typing_extensions==4.7.1
65
+ tzdata==2023.3
66
+ uc-micro-py==1.0.2
67
+ urllib3==2.0.4
68
+ uvicorn==0.23.2
69
+ websockets==11.0.3
70
+ yarl==1.9.2
71
+ zipp==3.16.2