Spaces:
Runtime error
Runtime error
File size: 4,993 Bytes
51830d8 b1742e6 51830d8 b1742e6 51830d8 b1742e6 51830d8 b1742e6 51830d8 b1742e6 51830d8 b1742e6 51830d8 7023cf6 51830d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
# Import Libraries
import gradio as gr
import google.generativeai as genai
# using the gemini model
# api
genai.configure(api_key="AIzaSyCmdsQeYMapI6l3maUpAkrE_GkT348w5UA")
# model_1 = genai.GenerativeModel('gemini-pro')
# model_2 = genai.GenerativeModel('gemini-pro')
model = genai.GenerativeModel('gemini-pro')
# assign score function
def assign_score(response):
if 'score=1' in response:
score = 1
elif 'score=2' in response:
score = 2
elif 'score=3' in response:
score = 3
elif 'score=4' in response:
score = 4
elif 'score=5' in response:
score = 5
else:
score = 0
return score
def model_prompt_response(prompt):
# using the gemini model
prompt_response_1 = model.generate_content(prompt + "in 3 sentences or less")
prompt_response_2 = model.generate_content(prompt + "in 3 sentences or less")
response1_model = prompt_response_1.text
response2_model = prompt_response_2.text
prompt_to_judge_1 = f'Given the question, {prompt} and answer {response1_model}, decide whether: score = 1 when answer is strongly irrelevant to the question, score = 2 when the answer is slitely relevant, score = 3 when the answer is relevant, score = 4 when it is slitely relevant and score = 5 for strongly relevant answer. Give reason for your answer and start with scores and write the score as [score=1], [score=2], [score=3], [score=4], or [score=5].'
prompt_to_judge_2 = f'Given the question, {prompt} and answer {response2_model}, decide whether: score = 1 when answer is strongly irrelevant to the question, score = 2 when the answer is slitely relevant, score = 3 when the answer is relevant, score = 4 when it is slitely relevant and score = 5 for strongly relevant answer. Give reason for your answer and start with scores and write the score as [score=1], [score=2], [score=3], [score=4], or [score=5]. '
judge_1_response_1 = model.generate_content(prompt_to_judge_1)
judge_2_response_2 = model.generate_content(prompt_to_judge_2)
judge_1 = judge_1_response_1.text
judge_2 = judge_2_response_2.text
score1_judger = assign_score(judge_1)
score2_judger = assign_score(judge_2)
# return values
return response1_model, judge_1, score1_judger, response2_model, judge_2, score2_judger
# anonators judgement
def judge_response(score1_judger, score2_judger, judge_1, judge_2):
score1_anonator = score1_judger
score2_anonator = score2_judger
response1_judger = judge_1
response2_judger = judge_2
return response1_judger, score1_anonator, response2_judger, score2_anonator
demo = gr.Blocks(theme=gr.themes.Monochrome(), title='Group1_Subtask2')
with demo:
gr.Markdown(
"""
# Group1_Subtask2
## The Model takes a request from the user and returns a response.
#### A judger is asigned the response from the model and outputs corresponding judgement.
#### The user asses the judgement and alters the responds to correspond to their views or objective truth
"""
)
prompt = gr.Textbox(label='Question to the model')
with gr.Row():
response1_model = gr.Textbox(label='Model Response 1')
judge_1 = gr.Textbox(label='Judger Model Response 1/User Editable', interactive=True)
score1_judger = gr.Number(value=2, label='Judger Model Score 1/User Editable', interactive=True)
with gr.Row():
response2_model = gr.Textbox(label='Model Response 2')
judge_2 = gr.Textbox(label='Judger Model Response 2/User Editable', interactive=True)
score2_judger = gr.Number(value=2, label='Judger Model Score 2/User Editable', interactive=True)
response_model = gr.Button("Model response")
# user/judger edited response
with gr.Row():
response1_judger = gr.Textbox(label='Corrected Response 1')
score1_anonator = gr.Number(label='Corrected score 1')
response2_judger = gr.Textbox(label='Corrected Response 2')
score2_anonator = gr.Number(label='Corrected Score 2')
judge_model = gr.Button("Judge-Annonator")
response_model.click(model_prompt_response, inputs=prompt, outputs=[response1_model,
judge_1,
score1_judger,
response2_model,
judge_2,
score2_judger,
])
judge_model.click(judge_response, inputs=[score1_judger, score2_judger, judge_1, judge_2] , outputs=[response1_judger, score1_anonator, response2_judger, score2_anonator])
demo.launch(share=True)
# Team Members
'''
Collins Okoth: collins.okech@digitaldividedata.com
Albert Disi: albert.disi@digitaldividedata.com
Joseph Amadi: joseph.amadi@digitaldividedata.com
''' |