disi98 commited on
Commit
b1742e6
1 Parent(s): 5cfdf47

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -10
app.py CHANGED
@@ -8,8 +8,9 @@ import google.generativeai as genai
8
  # api
9
  genai.configure(api_key="AIzaSyCmdsQeYMapI6l3maUpAkrE_GkT348w5UA")
10
 
 
 
11
 
12
- # gemini model initialization
13
  model = genai.GenerativeModel('gemini-pro')
14
 
15
  # assign score function
@@ -28,11 +29,9 @@ def assign_score(response):
28
  score = 0
29
  return score
30
 
31
- # funtion: prompt to model, parse the response to the judger, and enable user editing judgment
32
  def model_prompt_response(prompt):
33
- # using the Gemini model
34
-
35
- # pompting the model for response
36
  prompt_response_1 = model.generate_content(prompt + "in 3 sentences or less")
37
  prompt_response_2 = model.generate_content(prompt + "in 3 sentences or less")
38
  response1_model = prompt_response_1.text
@@ -43,14 +42,12 @@ def model_prompt_response(prompt):
43
 
44
  prompt_to_judge_2 = f'Given the question, {prompt} and answer {response2_model}, decide whether: score = 1 when answer is strongly irrelevant to the question, score = 2 when the answer is slitely relevant, score = 3 when the answer is relevant, score = 4 when it is slitely relevant and score = 5 for strongly relevant answer. Give reason for your answer and start with scores and write the score as [score=1], [score=2], [score=3], [score=4], or [score=5]. '
45
 
46
- # prompting the judger for judgment
47
  judge_1_response_1 = model.generate_content(prompt_to_judge_1)
48
  judge_2_response_2 = model.generate_content(prompt_to_judge_2)
49
  judge_1 = judge_1_response_1.text
50
  judge_2 = judge_2_response_2.text
51
 
52
 
53
- # assigning scores
54
  score1_judger = assign_score(judge_1)
55
  score2_judger = assign_score(judge_2)
56
 
@@ -59,9 +56,10 @@ def model_prompt_response(prompt):
59
 
60
  return response1_model, judge_1, score1_judger, response2_model, judge_2, score2_judger
61
 
 
 
62
  # anonators judgement
63
  def judge_response(score1_judger, score2_judger, judge_1, judge_2):
64
- # user judgemnet
65
  score1_anonator = score1_judger
66
  score2_anonator = score2_judger
67
  response1_judger = judge_1
@@ -71,14 +69,17 @@ def judge_response(score1_judger, score2_judger, judge_1, judge_2):
71
 
72
  demo = gr.Blocks(theme=gr.themes.Monochrome(), title='Group1_Subtask2')
73
 
 
 
 
74
  with demo:
75
 
76
  gr.Markdown(
77
  """
78
  # Group1_Subtask2
79
  ## The Model takes a request from the user and returns a response.
80
- #### A judger is assigned the response from the model and outputs the corresponding judgment.
81
- #### The user asses the judgment and alters the response to correspond to their views or objective truth
82
  """
83
  )
84
  prompt = gr.Textbox(label='Question to the model')
@@ -97,6 +98,9 @@ with demo:
97
  response_model = gr.Button("Model response")
98
 
99
 
 
 
 
100
  # user/judger edited response
101
  with gr.Row():
102
  response1_judger = gr.Textbox(label='Corrected Response 1')
 
8
  # api
9
  genai.configure(api_key="AIzaSyCmdsQeYMapI6l3maUpAkrE_GkT348w5UA")
10
 
11
+ # model_1 = genai.GenerativeModel('gemini-pro')
12
+ # model_2 = genai.GenerativeModel('gemini-pro')
13
 
 
14
  model = genai.GenerativeModel('gemini-pro')
15
 
16
  # assign score function
 
29
  score = 0
30
  return score
31
 
 
32
  def model_prompt_response(prompt):
33
+ # using the gemini model
34
+
 
35
  prompt_response_1 = model.generate_content(prompt + "in 3 sentences or less")
36
  prompt_response_2 = model.generate_content(prompt + "in 3 sentences or less")
37
  response1_model = prompt_response_1.text
 
42
 
43
  prompt_to_judge_2 = f'Given the question, {prompt} and answer {response2_model}, decide whether: score = 1 when answer is strongly irrelevant to the question, score = 2 when the answer is slitely relevant, score = 3 when the answer is relevant, score = 4 when it is slitely relevant and score = 5 for strongly relevant answer. Give reason for your answer and start with scores and write the score as [score=1], [score=2], [score=3], [score=4], or [score=5]. '
44
 
 
45
  judge_1_response_1 = model.generate_content(prompt_to_judge_1)
46
  judge_2_response_2 = model.generate_content(prompt_to_judge_2)
47
  judge_1 = judge_1_response_1.text
48
  judge_2 = judge_2_response_2.text
49
 
50
 
 
51
  score1_judger = assign_score(judge_1)
52
  score2_judger = assign_score(judge_2)
53
 
 
56
 
57
  return response1_model, judge_1, score1_judger, response2_model, judge_2, score2_judger
58
 
59
+
60
+
61
  # anonators judgement
62
  def judge_response(score1_judger, score2_judger, judge_1, judge_2):
 
63
  score1_anonator = score1_judger
64
  score2_anonator = score2_judger
65
  response1_judger = judge_1
 
69
 
70
  demo = gr.Blocks(theme=gr.themes.Monochrome(), title='Group1_Subtask2')
71
 
72
+
73
+
74
+
75
  with demo:
76
 
77
  gr.Markdown(
78
  """
79
  # Group1_Subtask2
80
  ## The Model takes a request from the user and returns a response.
81
+ #### A judger is asigned the response from the model and outputs corresponding judgement.
82
+ #### The user asses the judgement and alters the responds to correspond to their views or objective truth
83
  """
84
  )
85
  prompt = gr.Textbox(label='Question to the model')
 
98
  response_model = gr.Button("Model response")
99
 
100
 
101
+
102
+
103
+
104
  # user/judger edited response
105
  with gr.Row():
106
  response1_judger = gr.Textbox(label='Corrected Response 1')