import json import os import shutil import gradio as gr from huggingface_hub import Repository import openai HF_TOKEN = os.environ.get("HF_TOKEN", None) API_URL = os.environ.get("API_URL") theme = gr.themes.Monochrome( primary_hue="indigo", secondary_hue="blue", neutral_hue="slate", radius_size=gr.themes.sizes.radius_sm, font=[ gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif", ], ) if HF_TOKEN: try: shutil.rmtree("./data/") except: pass repo = Repository( local_dir="./data/", clone_from="Ligeng-Zhu/gpt-eval-prompts", token=HF_TOKEN, repo_type="dataset", ) repo.git_pull() PROMPT_TEMPLATE = """Question: {prompt}\n\nAnswer:""" def save_inputs_and_outputs(inputs, outputs, generate_kwargs): with open(os.path.join("data", "prompts.jsonl"), "a") as f: json.dump( {"inputs": inputs, "outputs": outputs, "generate_kwargs": generate_kwargs}, f, ensure_ascii=False, ) f.write("\n") commit_url = repo.push_to_hub() example_system_prompt = [ "You are a helpful and precise assistant for checking the quality of the answer." ] example_your_prompt = [ "[Question]\n{question}\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of of Assistant's Answer]\n\nWe would like to request your feedback on the performance of the AI assistant in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only the value indicating the scores for the Assistant. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment." ] examples = [ ["You are a helpful and precise assistant for checking the quality of the answer.", "[Question]\n{question}\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of of Assistant's Answer]\n\nWe would like to request your feedback on the performance of the AI assistant in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only the value indicating the scores for the Assistant. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."] ] def gpt_eval(system_prompt, prompt, question, answer, openai_key, do_save=True): if openai_key is None or len(openai_key) <= 10: yield "Please enter a valid openai API key" return # return prompt.format(question=question, answer=answer) origin_input = prompt.format(question=question, answer=answer) input_str = system_prompt + "\n" + origin_input + "\n\n---\n" yield input_str openai.api_key = openai_key res = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ {"role": "system", "content": system_prompt}, {"role": "user", "content": origin_input}, ], stream=True, ) output = "" for chunk in res: content = chunk["choices"][0].get("delta", {}).get("content") if content is not None: # print(content, end="") output += content yield input_str + output if do_save and HF_TOKEN: save_inputs_and_outputs( inputs=system_prompt + "\n" + origin_input, outputs=output, generate_kwargs={} ) return css = ".generating {visibility: hidden}" # + share_btn_css system_prompt = gr.Textbox( value = "You are a helpful and precise assistant for checking the quality of the answer.", interactive=True, label="System Prompt", ) your_prompt = gr.Textbox( placeholder="Enter your prompt here", value="[Question]\n{question}\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of of Assistant's Answer]\n\nWe would like to request your feedback on the performance of the AI assistant in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only the value indicating the scores for the Assistant. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment.", label="Your Prompt", interactive=True, ) llm_question = gr.Textbox( placeholder="Enter your question here", value="What is the meaning of life?", label="Your Question", elem_id="q-input", ) llm_answer = gr.Textbox( placeholder="Enter your answer here", label="Your LLM's Answer", value="C'est la vie!", elem_id="q-tmp-output", ) with gr.Blocks(theme=theme, analytics_enabled=False, css=css) as demo: with gr.Column(): gr.Markdown( """ Type in the box below and click the button to generate answers to your most pressing questions! ⚠️ **Data Collection**: by default, we are collecting the prompts entered in this app to further improve and evaluate the model. Do not share any personal or sensitive information while using the app! You can opt out of this data collection by removing the checkbox below: """ ) with gr.Row(): with gr.Column(scale=3): do_save = gr.Checkbox( value=True, label="Store data", info="You agree to the storage of your prompt and generated text for research and development purposes:", ) system_prompt.render() your_prompt.render() llm_question.render() llm_answer.render() with gr.Box(): gr.Markdown("**Evaluation by GPT**") evaluations = gr.Markdown(elem_id="q-output") submit = gr.Button("Generate", variant="primary") with gr.Column(scale=1): openai_key = gr.Textbox( placeholder="This will not be saved or shared.", label="OpenAI API", type="password", ) openai_model = gr.Textbox( value="gpt-3.5-turbo", label="Model (More opions coming soon)", ) # gr.Examples( # examples=example_system_prompt, # inputs=[system_prompt], # label="Example System Prompt" # ) # gr.Examples( # examples=example_your_prompt, # inputs=[your_prompt], # label="Example Your Prompt" # ) example_box = gr.Examples( examples=examples, inputs=[system_prompt, your_prompt], cache_examples=False, ) submit.click( gpt_eval, inputs=[system_prompt, your_prompt, llm_question, llm_answer, openai_key], outputs=[evaluations], ) # llm_question.submit( # generate, # inputs=[llm_question, temperature, max_new_tokens, top_p, repetition_penalty], # outputs=[evaluations], # ) demo.queue().launch(debug=True)