disi98 commited on
Commit
7e0cf1f
1 Parent(s): acff5e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -39
app.py CHANGED
@@ -1,70 +1,91 @@
1
  # Import Libraries
2
  import gradio as gr
3
- from transformers import GPT2Tokenizer, GPT2LMHeadModel
4
- import pandas as pd
5
- import numpy as np
6
- import random
7
 
8
 
9
 
10
- # Load pre-trained model and tokenizer
11
- model_name = "gpt2"
12
- tokenizer = GPT2Tokenizer.from_pretrained(model_name)
13
- model = GPT2LMHeadModel.from_pretrained(model_name)
14
 
 
 
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  def model_prompt_response(prompt):
18
- score1_judger = random.randint(1,5)
19
- score2_judger = random.randint(1,5)
20
 
21
- # Tokenize the prompt
22
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
 
 
 
 
23
 
24
- # Generate response using beam search
25
- response1_model_out = model.generate(input_ids, max_length=100,
26
- num_return_sequences=4, no_repeat_ngram_size=2, num_beams=5)
27
 
 
 
 
 
 
 
28
 
29
- responses = []
30
- for i, out in enumerate(response1_model_out):
31
- response_out = tokenizer.decode(out, skip_special_tokens=True)
32
- responses.append(response_out)
33
 
34
- response1_model = responses[0]
35
- response2_model = responses[1]
36
- judge_1 = responses[2]
37
- judge_2 = responses[3]
38
 
39
  # return values
40
 
41
- return response1_model, judge_1, score1_judger, response2_model, judge_2, score2_judger
 
 
42
 
43
  # anonators judgement
44
- def judge_response(score1_judger, score2_judger):
45
  score1_anonator = score1_judger
46
  score2_anonator = score2_judger
47
- response1_judger = f'This is a representation for response 1'
48
- response2_judger = f'This is a representation for response 2'
49
 
50
  return response1_judger, score1_anonator, response2_judger, score2_anonator
51
 
52
  demo = gr.Blocks(theme=gr.themes.Monochrome(), title='Group1_Subtask2')
53
 
 
 
 
54
  with demo:
55
 
56
  gr.Markdown(
57
  """
58
  # Group1_Subtask2
59
- ## The Model takes a request from the user and returns a response.
60
- #### A judger is asigned the response from the model and outputs corresponding judgement.
61
  #### The user asses the judgement and alters the responds to correspond to their views or objective truth
62
  """
63
  )
64
  prompt = gr.Textbox(label='Question to the model')
65
  with gr.Row():
66
  response1_model = gr.Textbox(label='Model Response 1')
67
-
68
  judge_1 = gr.Textbox(label='Judger Model Response 1/User Editable', interactive=True)
69
 
70
  score1_judger = gr.Number(value=2, label='Judger Model Score 1/User Editable', interactive=True)
@@ -72,16 +93,21 @@ with demo:
72
  response2_model = gr.Textbox(label='Model Response 2')
73
  judge_2 = gr.Textbox(label='Judger Model Response 2/User Editable', interactive=True)
74
  score2_judger = gr.Number(value=2, label='Judger Model Score 2/User Editable', interactive=True)
75
-
76
 
77
  response_model = gr.Button("Model response")
78
 
79
- #
80
 
81
- response1_judger = gr.Textbox(label='Corrected Response 1')
82
- score1_anonator = gr.Number(label='Corrected score 1')
83
- response2_judger = gr.Textbox(label='Corrected Response 2')
84
- score2_anonator = gr.Number(label='Corrected Score 2')
 
 
 
 
 
 
85
 
86
 
87
  judge_model = gr.Button("Judge-Annonator")
@@ -93,13 +119,16 @@ with demo:
93
  judge_2,
94
  score2_judger,
95
  ])
96
- judge_model.click(judge_response, inputs=[score1_judger, score2_judger] , outputs=[response1_judger, score1_anonator, response2_judger, score2_anonator])
 
 
97
 
98
- demo.launch()
 
99
 
100
  # Team Members
101
  '''
102
  Collins Okoth: collins.okech@digitaldividedata.com
103
  Albert Disi: albert.disi@digitaldividedata.com
104
  Joseph Amadi: joseph.amadi@digitaldividedata.com
105
- '''
 
1
  # Import Libraries
2
  import gradio as gr
3
+ import google.generativeai as genai
 
 
 
4
 
5
 
6
 
7
+ # using the gemini model
8
+ # api
9
+ genai.configure(api_key="AIzaSyCmdsQeYMapI6l3maUpAkrE_GkT348w5UA")
 
10
 
11
+ # model_1 = genai.GenerativeModel('gemini-pro')
12
+ # model_2 = genai.GenerativeModel('gemini-pro')
13
 
14
+ model = genai.GenerativeModel('gemini-pro')
15
+
16
+ # assign score function
17
+ def assign_score(response):
18
+ if 'score=1' in response:
19
+ score = 1
20
+ elif 'score=2' in response:
21
+ score = 2
22
+ elif 'score=3' in response:
23
+ score = 3
24
+ elif 'score=4' in response:
25
+ score = 4
26
+ elif 'score=5' in response:
27
+ score = 5
28
+ else:
29
+ score = 0
30
+ return score
31
 
32
  def model_prompt_response(prompt):
33
+ # using the gemini model
 
34
 
35
+ prompt_response_1 = model.generate_content(prompt + "in 3 sentences or less")
36
+ prompt_response_2 = model.generate_content(prompt + "in 3 sentences or less")
37
+ response1_model = prompt_response_1.text
38
+ response2_model = prompt_response_2.text
39
+
40
+ prompt_to_judge_1 = f'Given the question, {prompt} and answer {response1_model}, decide whether: score = 1 when answer is strongly irrelevant to the question, score = 2 when the answer is slitely relevant, score = 3 when the answer is relevant, score = 4 when it is slitely relevant and score = 5 for strongly relevant answer. Give reason for your answer and start with scores and write the score as [score=1], [score=2], [score=3], [score=4], or [score=5].'
41
 
 
 
 
42
 
43
+ prompt_to_judge_2 = f'Given the question, {prompt} and answer {response2_model}, decide whether: score = 1 when answer is strongly irrelevant to the question, score = 2 when the answer is slitely relevant, score = 3 when the answer is relevant, score = 4 when it is slitely relevant and score = 5 for strongly relevant answer. Give reason for your answer and start with scores and write the score as [score=1], [score=2], [score=3], [score=4], or [score=5]. '
44
+
45
+ judge_1_response_1 = model.generate_content(prompt_to_judge_1)
46
+ judge_2_response_2 = model.generate_content(prompt_to_judge_2)
47
+ judge_1 = judge_1_response_1.text
48
+ judge_2 = judge_2_response_2.text
49
 
 
 
 
 
50
 
51
+ score1_judger = assign_score(judge_1)
52
+ score2_judger = assign_score(judge_2)
53
+
 
54
 
55
  # return values
56
 
57
+ return response1_model, judge_1, score1_judger, response2_model, judge_2, score2_judger
58
+
59
+
60
 
61
  # anonators judgement
62
+ def judge_response(score1_judger, score2_judger, judge_1, judge_2):
63
  score1_anonator = score1_judger
64
  score2_anonator = score2_judger
65
+ response1_judger = judge_1
66
+ response2_judger = judge_2
67
 
68
  return response1_judger, score1_anonator, response2_judger, score2_anonator
69
 
70
  demo = gr.Blocks(theme=gr.themes.Monochrome(), title='Group1_Subtask2')
71
 
72
+
73
+
74
+
75
  with demo:
76
 
77
  gr.Markdown(
78
  """
79
  # Group1_Subtask2
80
+ ## The Model takes a request from the user and returns a response.
81
+ #### A judger is asigned the response from the model and outputs corresponding judgement.
82
  #### The user asses the judgement and alters the responds to correspond to their views or objective truth
83
  """
84
  )
85
  prompt = gr.Textbox(label='Question to the model')
86
  with gr.Row():
87
  response1_model = gr.Textbox(label='Model Response 1')
88
+
89
  judge_1 = gr.Textbox(label='Judger Model Response 1/User Editable', interactive=True)
90
 
91
  score1_judger = gr.Number(value=2, label='Judger Model Score 1/User Editable', interactive=True)
 
93
  response2_model = gr.Textbox(label='Model Response 2')
94
  judge_2 = gr.Textbox(label='Judger Model Response 2/User Editable', interactive=True)
95
  score2_judger = gr.Number(value=2, label='Judger Model Score 2/User Editable', interactive=True)
96
+
97
 
98
  response_model = gr.Button("Model response")
99
 
 
100
 
101
+
102
+
103
+
104
+ # user/judger edited response
105
+ with gr.Row():
106
+ response1_judger = gr.Textbox(label='Corrected Response 1')
107
+ score1_anonator = gr.Number(label='Corrected score 1')
108
+
109
+ response2_judger = gr.Textbox(label='Corrected Response 2')
110
+ score2_anonator = gr.Number(label='Corrected Score 2')
111
 
112
 
113
  judge_model = gr.Button("Judge-Annonator")
 
119
  judge_2,
120
  score2_judger,
121
  ])
122
+ judge_model.click(judge_response, inputs=[score1_judger, score2_judger, judge_1, judge_2] , outputs=[response1_judger, score1_anonator, response2_judger, score2_anonator])
123
+
124
+
125
 
126
+
127
+ demo.launch(share=True)
128
 
129
  # Team Members
130
  '''
131
  Collins Okoth: collins.okech@digitaldividedata.com
132
  Albert Disi: albert.disi@digitaldividedata.com
133
  Joseph Amadi: joseph.amadi@digitaldividedata.com
134
+ '''