import json import os import shutil import requests import gradio as gr from huggingface_hub import Repository from text_generation import Client from share_btn import community_icon_html, loading_icon_html, share_js, share_btn_css HF_TOKEN = os.environ.get("HF_TOKEN", None) API_URL = "https://api-inference.huggingface.co/models/codellama/CodeLlama-13b-hf" file_name = "Project_CodeNet_LangClass.tar.gz" data_url = f"https://dax-cdn.cdn.appdomain.cloud/dax-project-codenet/1.0.0/{file_name}" FIM_PREFIX = "
 "
FIM_MIDDLE = " "
FIM_SUFFIX = " "

FIM_INDICATOR = ""

EOS_STRING = ""
EOT_STRING = ""

theme = gr.themes.Monochrome(
    primary_hue="indigo",
    secondary_hue="blue",
    neutral_hue="slate",
    radius_size=gr.themes.sizes.radius_sm,
    font=[
        gr.themes.GoogleFont("Open Sans"),
        "ui-sans-serif",
        "system-ui",
        "sans-serif",
    ],
)

client = Client(
    API_URL,
    headers={"Authorization": f"Bearer {HF_TOKEN}"},
)


def generate(
    prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
):

    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)
    fim_mode = False

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )

    if FIM_INDICATOR in prompt:
        fim_mode = True
        try:
            prefix, suffix = prompt.split(FIM_INDICATOR)
        except:
            raise ValueError(f"Only one {FIM_INDICATOR} allowed in prompt!")
        prompt = f"{FIM_PREFIX}{prefix}{FIM_SUFFIX}{suffix}{FIM_MIDDLE}"

    
    stream = client.generate_stream(prompt, **generate_kwargs)
    

    if fim_mode:
        output = prefix
    else:
        output = prompt

    previous_token = ""
    for response in stream:
        if any([end_token in response.token.text for end_token in [EOS_STRING, EOT_STRING]]):
            if fim_mode:
                output += suffix
                yield output
                return output
                print("output", output)
            else:
                return output
        else:
            output += response.token.text
        previous_token = response.token.text
        yield output
    return output


examples = [
    "# Use the Project CodeNet model to convert this Java code into Python.\n// Java code to reverse a string\npublic String reverseString(String input) {",
    "# Write a function in C++ to compute the factorial of a number using recursion.\nint factorial(int n) {",
    "# Python function to check if a given number is prime. Complete the function.\ndef is_prime(num):\n    for i in range(2, num):",
    "# JavaScript code to dynamically change the color of a webpage background based on user input.\ndocument.body.style.backgroundColor = ",
    "# Given a list of employee names and their salaries, write a Python code to sort the employees by their salary in descending order.\nemployees = [('John Doe', 120000), ('Jane Smith', 110000), ",
    "# Write a Ruby method to count the number of vowels in a given string.\ndef count_vowels(string)\n    vowels_count = 0\n",
]



def process_example(args):
    for x in generate(args):
        pass
    return x


css = ".generating {visibility: hidden}"

monospace_css = """
#q-input textarea {
    font-family: monospace, 'Consolas', Courier, monospace;
}
"""


css += share_btn_css + monospace_css + ".gradio-container {color: black}"

description = """

🦙 Project Codenet Playground

We have leveraged the expansive dataset from Project CodeNet to enhance our model, CodeLlama. The Project CodeNet dataset is an extensive collection specifically curated to spur algorithmic advancements in AI for coding tasks. It supports a wide range of applications including code translation, code similarity assessment, code classification, and code search functionalities.

""" with gr.Blocks(theme=theme, analytics_enabled=False, css=css) as demo: with gr.Column(): gr.Markdown(description) with gr.Row(): with gr.Column(): instruction = gr.Textbox( placeholder="Enter your code here", lines=5, label="Input", elem_id="q-input", ) submit = gr.Button("Generate", variant="primary") output = gr.Code(elem_id="q-output", lines=30, label="Output") with gr.Row(): with gr.Column(): with gr.Accordion("Advanced settings", open=False): with gr.Row(): column_1, column_2 = gr.Column(), gr.Column() with column_1: temperature = gr.Slider( label="Temperature", value=0.1, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ) max_new_tokens = gr.Slider( label="Max new tokens", value=256, minimum=0, maximum=8192, step=64, interactive=True, info="The maximum numbers of new tokens", ) with column_2: top_p = gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ) repetition_penalty = gr.Slider( label="Repetition penalty", value=1.05, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens", ) gr.Examples( examples=examples, inputs=[instruction], cache_examples=False, fn=process_example, outputs=[output], ) submit.click( generate, inputs=[instruction, temperature, max_new_tokens, top_p, repetition_penalty], outputs=[output], ) demo.queue(concurrency_count=16).launch(debug=True)