import gradio as gr import os from text_generation import Client FIM_PREFIX = "" FIM_MIDDLE = "" FIM_SUFFIX = "" FIM_INDICATOR = "" API_URL_BASE ="https://api-inference.huggingface.co/models/bigcode/starcoderbase" HF_TOKEN = os.environ.get("HF_TOKEN", None) theme = gr.themes.Monochrome( primary_hue="indigo", secondary_hue="blue", neutral_hue="slate", radius_size=gr.themes.sizes.radius_sm, font=[ gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif", ], ) css = ".generating {visibility: hidden}" monospace_css = """ #q-input textarea { font-family: monospace, 'Consolas', Courier, monospace; } """ css += monospace_css + ".gradio-container {color: black}" description = """

This is a demo to generate code with StarCoder

""" examples = [ "X_train, y_train, X_test, y_test = train_test_split(X, y, test_size=0.1)\n\n# Train a logistic regression model, predict the labels on the test set and compute the accuracy score", "// Returns every other value in the array as a new array.\nfunction everyOther(arr) {", "def alternating(list1, list2):\n results = []\n for i in range(min(len(list1), len(list2))):\n results.append(list1[i])\n results.append(list2[i])\n if len(list1) > len(list2):\n \n else:\n results.extend(list2[i+1:])\n return results", ] client_base = Client( API_URL_BASE, headers={"Authorization": f"Bearer {HF_TOKEN}"}, ) def generate( prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, version="StarCoder", ): temperature = float(temperature) if temperature < 1e-2: temperature = 1e-2 top_p = float(top_p) fim_mode = False generate_kwargs = dict( temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42, ) if FIM_INDICATOR in prompt: fim_mode = True try: prefix, suffix = prompt.split(FIM_INDICATOR) except: raise ValueError(f"Only one {FIM_INDICATOR} allowed in prompt!") prompt = f"{FIM_PREFIX}{prefix}{FIM_SUFFIX}{suffix}{FIM_MIDDLE}" if version == "StarCoder": stream = client.generate_stream(prompt, **generate_kwargs) else: stream = client_base.generate_stream(prompt, **generate_kwargs) if fim_mode: output = prefix else: output = prompt previous_token = "" for response in stream: if response.token.text == "<|endoftext|>": if fim_mode: output += suffix else: return output else: output += response.token.text previous_token = response.token.text yield output return output def process_example(args): for x in generate(args): pass return x with gr.Blocks(theme=theme, analytics_enabled=False, css=css) as demo: with gr.Column(): gr.Markdown(description) with gr.Row(): with gr.Column(): instruction = gr.Textbox( placeholder="Enter your code here", label="Code", elem_id="q-input", ) submit = gr.Button("Generate", variant="primary") output = gr.Code(elem_id="q-output", lines=30) gr.Examples( examples=examples, inputs=[instruction], cache_examples=False, fn=process_example, outputs=[output], ) demo.queue(concurrency_count=16).launch(debug=True)