Spaces:
Runtime error
Runtime error
# Import Libraries | |
import gradio as gr | |
import google.generativeai as genai | |
# using the gemini model | |
# api | |
genai.configure(api_key="AIzaSyCmdsQeYMapI6l3maUpAkrE_GkT348w5UA") | |
# model_1 = genai.GenerativeModel('gemini-pro') | |
# model_2 = genai.GenerativeModel('gemini-pro') | |
model = genai.GenerativeModel('gemini-pro') | |
# assign score function | |
def assign_score(response): | |
if 'score=1' in response: | |
score = 1 | |
elif 'score=2' in response: | |
score = 2 | |
elif 'score=3' in response: | |
score = 3 | |
elif 'score=4' in response: | |
score = 4 | |
elif 'score=5' in response: | |
score = 5 | |
else: | |
score = 0 | |
return score | |
def model_prompt_response(prompt): | |
# using the gemini model | |
prompt_response_1 = model.generate_content(prompt + "in 3 sentences or less") | |
prompt_response_2 = model.generate_content(prompt + "in 3 sentences or less") | |
response1_model = prompt_response_1.text | |
response2_model = prompt_response_2.text | |
prompt_to_judge_1 = f'Given the question, {prompt} and answer {response1_model}, decide whether: score = 1 when answer is strongly irrelevant to the question, score = 2 when the answer is slitely relevant, score = 3 when the answer is relevant, score = 4 when it is slitely relevant and score = 5 for strongly relevant answer. Give reason for your answer and start with scores and write the score as [score=1], [score=2], [score=3], [score=4], or [score=5].' | |
prompt_to_judge_2 = f'Given the question, {prompt} and answer {response2_model}, decide whether: score = 1 when answer is strongly irrelevant to the question, score = 2 when the answer is slitely relevant, score = 3 when the answer is relevant, score = 4 when it is slitely relevant and score = 5 for strongly relevant answer. Give reason for your answer and start with scores and write the score as [score=1], [score=2], [score=3], [score=4], or [score=5]. ' | |
judge_1_response_1 = model.generate_content(prompt_to_judge_1) | |
judge_2_response_2 = model.generate_content(prompt_to_judge_2) | |
judge_1 = judge_1_response_1.text | |
judge_2 = judge_2_response_2.text | |
score1_judger = assign_score(judge_1) | |
score2_judger = assign_score(judge_2) | |
# return values | |
return response1_model, judge_1, score1_judger, response2_model, judge_2, score2_judger | |
# anonators judgement | |
def judge_response(score1_judger, score2_judger, judge_1, judge_2): | |
score1_anonator = score1_judger | |
score2_anonator = score2_judger | |
response1_judger = judge_1 | |
response2_judger = judge_2 | |
return response1_judger, score1_anonator, response2_judger, score2_anonator | |
demo = gr.Blocks(theme=gr.themes.Monochrome(), title='Group1_Subtask2') | |
with demo: | |
gr.Markdown( | |
""" | |
# Group1_Subtask2 | |
## The Model takes a request from the user and returns a response. | |
#### A judger is asigned the response from the model and outputs corresponding judgement. | |
#### The user asses the judgement and alters the responds to correspond to their views or objective truth | |
""" | |
) | |
prompt = gr.Textbox(label='Question to the model') | |
with gr.Row(): | |
response1_model = gr.Textbox(label='Model Response 1') | |
judge_1 = gr.Textbox(label='Judger Model Response 1/User Editable', interactive=True) | |
score1_judger = gr.Number(value=2, label='Judger Model Score 1/User Editable', interactive=True) | |
with gr.Row(): | |
response2_model = gr.Textbox(label='Model Response 2') | |
judge_2 = gr.Textbox(label='Judger Model Response 2/User Editable', interactive=True) | |
score2_judger = gr.Number(value=2, label='Judger Model Score 2/User Editable', interactive=True) | |
response_model = gr.Button("Model response") | |
# user/judger edited response | |
with gr.Row(): | |
response1_judger = gr.Textbox(label='Corrected Response 1') | |
score1_anonator = gr.Number(label='Corrected score 1') | |
response2_judger = gr.Textbox(label='Corrected Response 2') | |
score2_anonator = gr.Number(label='Corrected Score 2') | |
judge_model = gr.Button("Judge-Annonator") | |
response_model.click(model_prompt_response, inputs=prompt, outputs=[response1_model, | |
judge_1, | |
score1_judger, | |
response2_model, | |
judge_2, | |
score2_judger, | |
]) | |
judge_model.click(judge_response, inputs=[score1_judger, score2_judger, judge_1, judge_2] , outputs=[response1_judger, score1_anonator, response2_judger, score2_anonator]) | |
demo.launch(share=True) | |
# Team Members | |
''' | |
Collins Okoth: collins.okech@digitaldividedata.com | |
Albert Disi: albert.disi@digitaldividedata.com | |
Joseph Amadi: joseph.amadi@digitaldividedata.com | |
''' |