File size: 8,185 Bytes
88a5ed3
 
 
 
 
 
 
 
 
86514f4
88a5ed3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86514f4
 
88a5ed3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86514f4
88a5ed3
 
 
 
 
 
 
 
 
 
86514f4
88a5ed3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86514f4
 
 
 
 
 
 
88a5ed3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86514f4
88a5ed3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
import json
import os
import shutil

import gradio as gr
from huggingface_hub import Repository

import openai

HF_TOKEN = os.environ.get("HF_TOKEN", None)
API_URL = os.environ.get("API_URL")

theme = gr.themes.Monochrome(
    primary_hue="indigo",
    secondary_hue="blue",
    neutral_hue="slate",
    radius_size=gr.themes.sizes.radius_sm,
    font=[
        gr.themes.GoogleFont("Open Sans"),
        "ui-sans-serif",
        "system-ui",
        "sans-serif",
    ],
)

if HF_TOKEN:
    try:
        shutil.rmtree("./data/")
    except:
        pass

    repo = Repository(
        local_dir="./data/",
        clone_from="Ligeng-Zhu/gpt-eval-prompts",
        token=HF_TOKEN,
        repo_type="dataset",
    )
    repo.git_pull()


PROMPT_TEMPLATE = """Question: {prompt}\n\nAnswer:"""

def save_inputs_and_outputs(inputs, outputs, generate_kwargs):
    with open(os.path.join("data", "prompts.jsonl"), "a") as f:
        json.dump(
            {"inputs": inputs, "outputs": outputs, "generate_kwargs": generate_kwargs},
            f,
            ensure_ascii=False,
        )
        f.write("\n")
        commit_url = repo.push_to_hub()
   
example_system_prompt = [
    "You are a helpful and precise assistant for checking the quality of the answer."
]
example_your_prompt = [
    "[Question]\n{question}\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of of Assistant's Answer]\n\nWe would like to request your feedback on the performance of the AI assistant in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only the value indicating the scores for the Assistant. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."
]
examples = [
    ["You are a helpful and precise assistant for checking the quality of the answer.", "[Question]\n{question}\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of of Assistant's Answer]\n\nWe would like to request your feedback on the performance of the AI assistant in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only the value indicating the scores for the Assistant. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."]
]

def gpt_eval(system_prompt, prompt, question, answer, openai_key, do_save=True):
    if openai_key is None or len(openai_key) <= 10:
        yield "Please enter a valid openai API key"
        return
    # return prompt.format(question=question, answer=answer)
    origin_input = prompt.format(question=question, answer=answer)
    input_str = system_prompt + "\n" + origin_input + "\n\n---\n"
    yield input_str
    openai.api_key = openai_key
    res = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": origin_input},
        ],
        stream=True,
    )
    output = ""
    for chunk in res:
        content = chunk["choices"][0].get("delta", {}).get("content")
        if content is not None:
            # print(content, end="")
            output += content
            yield input_str + output
    if do_save and HF_TOKEN:
        save_inputs_and_outputs(
            inputs=system_prompt + "\n" + origin_input, 
            outputs=output,
            generate_kwargs={}
        )
    return 

css = ".generating {visibility: hidden}"  # + share_btn_css


system_prompt = gr.Textbox(
    value = "You are a helpful and precise assistant for checking the quality of the answer.",
    interactive=True,
    label="System Prompt",
)
your_prompt = gr.Textbox(
    placeholder="Enter your prompt here",
    value="[Question]\n{question}\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of of Assistant's Answer]\n\nWe would like to request your feedback on the performance of the AI assistant in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only the value indicating the scores for the Assistant. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment.",
    label="Your Prompt",
    interactive=True,
)
llm_question = gr.Textbox(
    placeholder="Enter your question here",
    value="What is the meaning of life?",
    label="Your Question",
    elem_id="q-input",
)
llm_answer = gr.Textbox(
    placeholder="Enter your answer here",
    label="Your LLM's Answer",
    value="C'est la vie!",
    elem_id="q-tmp-output",
)



with gr.Blocks(theme=theme, analytics_enabled=False, css=css) as demo:
    with gr.Column():
        gr.Markdown(
            """
            Type in the box below and click the button to generate answers to your most pressing questions!            
            ⚠️ **Data Collection**: by default, we are collecting the prompts entered in this app to further improve and evaluate the model. Do not share any personal or sensitive information while using the app! You can opt out of this data collection by removing the checkbox below:
        """
        )
        
        with gr.Row():
            with gr.Column(scale=3):
                do_save = gr.Checkbox(
                    value=True,
                    label="Store data",
                    info="You agree to the storage of your prompt and generated text for research and development purposes:",
                )
                system_prompt.render()
                your_prompt.render()
                llm_question.render()
                llm_answer.render()
                
                with gr.Box():
                    gr.Markdown("**Evaluation by GPT**")
                    evaluations = gr.Markdown(elem_id="q-output")
                submit = gr.Button("Generate", variant="primary")
                

            with gr.Column(scale=1):
                openai_key = gr.Textbox(
                    placeholder="This will not be saved or shared.",
                    label="OpenAI API",
                    type="password",
                )
                
                openai_model = gr.Textbox(
                    value="gpt-3.5-turbo",
                    label="Model (More opions coming soon)",
                )
    
                # gr.Examples(
                #     examples=example_system_prompt,
                #     inputs=[system_prompt],
                #     label="Example System Prompt"
                # )
                
                # gr.Examples(
                #     examples=example_your_prompt,
                #     inputs=[your_prompt],
                #     label="Example Your Prompt"
                # )

        example_box = gr.Examples(
            examples=examples,
            inputs=[system_prompt, your_prompt],
            cache_examples=False,
        )
        
    submit.click(
        gpt_eval,
        inputs=[system_prompt, your_prompt, llm_question, llm_answer, openai_key],
        outputs=[evaluations],
    )
  
    # llm_question.submit(
    #     generate,
    #     inputs=[llm_question, temperature, max_new_tokens, top_p, repetition_penalty],
    #     outputs=[evaluations],
    # )

demo.queue().launch(debug=True)