Spaces:
Sleeping
Sleeping
Naisong Zhou
commited on
Commit
•
9863223
1
Parent(s):
11fc848
revise to add google slides api
Browse files- .gitignore +2 -0
- app.py +91 -31
- configs.py +0 -1
- requirements.txt +2 -1
- save_data.py +74 -0
- utils.py +25 -18
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
*.json
|
app.py
CHANGED
@@ -1,38 +1,98 @@
|
|
1 |
import gradio as gr
|
2 |
from utils import *
|
3 |
-
import
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
|
9 |
-
def
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
else:
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
greet_btn.click(fn=get_answer,
|
27 |
-
inputs=[task, human_input, cooperate_style],
|
28 |
-
outputs=output,
|
29 |
-
api_name="answer")
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from utils import *
|
3 |
+
from save_data import add_new_data, get_sheet_service
|
4 |
|
5 |
+
class SessionManager:
|
6 |
+
def __init__(self):
|
7 |
+
self.sessions = []
|
8 |
|
9 |
+
def add_session(self, task, human_input, cooperate_style):
|
10 |
+
session = {
|
11 |
+
"task": task,
|
12 |
+
"human_input": human_input,
|
13 |
+
"cooperate_style": cooperate_style,
|
14 |
+
"ai_output": None,
|
15 |
+
"merged_output": None,
|
16 |
+
"evaluation": None
|
17 |
+
}
|
18 |
+
self.sessions.append(session)
|
19 |
+
return len(self.sessions) - 1
|
20 |
+
|
21 |
+
def update_output(self, index, output,output_type = 'merged_output'):
|
22 |
+
self.sessions[index][output_type] = output
|
23 |
+
|
24 |
+
def get_session(self, index):
|
25 |
+
return self.sessions[index]
|
26 |
+
|
27 |
+
def save_session_to_sheet(self, index, service, SHEET_ID):
|
28 |
+
session = self.sessions[index]
|
29 |
+
new_row = list(session.values())
|
30 |
+
add_new_data(new_row, service, SHEET_ID, num_of_columns=6) # 6 columns in the sheet
|
31 |
+
|
32 |
+
|
33 |
+
def handle_interaction(task, human_input, cooperate_style, session_manager, api_key):
|
34 |
+
session_index = session_manager.add_session(task, human_input, cooperate_style)
|
35 |
+
if cooperate_style == "sequential":
|
36 |
+
output = merge_texts_sequential(task, human_input, api_key)
|
37 |
+
session_manager.update_output(session_index, output, 'ai_output')
|
38 |
+
ai_output = output
|
39 |
else:
|
40 |
+
ai_output = generate_text_with_gpt(task, api_key)
|
41 |
+
session_manager.update_output(session_index, ai_output, 'ai_output')
|
42 |
+
output = merge_texts_parallel(task, human_input, ai_output, api_key)
|
43 |
+
session_manager.update_output(session_index, output, 'merged_output')
|
44 |
+
return ai_output, output, session_index
|
45 |
+
|
46 |
+
def evaluate_interaction(session_index, session_manager, api_key):
|
47 |
+
session = session_manager.get_session(session_index)
|
48 |
+
evaluation = get_evaluation_with_gpt(session['task'], session['merged_output'], api_key)
|
49 |
+
session['evaluation'] = evaluation
|
50 |
+
return evaluation
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
+
def save_data(session_index, session_manager, service, SHEET_ID):
|
53 |
+
session_manager.save_session_to_sheet(session_index, service, SHEET_ID)
|
54 |
+
return "Data has been saved to Google Sheets."
|
55 |
+
|
56 |
+
if __name__ == "__main__":
|
57 |
+
api_key = get_api_key(local=False)
|
58 |
+
service, SHEET_ID = get_sheet_service(local=False)
|
59 |
+
session_manager = SessionManager()
|
60 |
+
|
61 |
+
with gr.Blocks() as app:
|
62 |
+
with gr.Row():
|
63 |
+
task = gr.Textbox(label="Task Description")
|
64 |
+
human_input = gr.Textbox(label="Human Input")
|
65 |
+
with gr.Row():
|
66 |
+
cooperate_style = gr.Radio(choices=['sequential', 'parallel'], label="Cooperation Style")
|
67 |
+
submit_btn = gr.Button("Create")
|
68 |
+
with gr.Row():
|
69 |
+
ai_output = gr.Textbox(label="AI Output (if it is sequential)")
|
70 |
+
merged_output = gr.Textbox(label="Merged Output given with cooperation")
|
71 |
+
session_index = gr.Number(label="Session Index", visible=False)
|
72 |
+
|
73 |
+
submit_btn.click(
|
74 |
+
fn=lambda task, human_input, cooperate_style: handle_interaction(task, human_input, cooperate_style, session_manager, api_key),
|
75 |
+
inputs=[task, human_input, cooperate_style],
|
76 |
+
outputs=[ai_output, merged_output, session_index]
|
77 |
+
)
|
78 |
+
|
79 |
+
evaluate_btn = gr.Button("Evaluate")
|
80 |
+
evaluation_result = gr.Textbox(label="Evaluation Result")
|
81 |
+
|
82 |
+
evaluate_btn.click(
|
83 |
+
fn=lambda session_index: evaluate_interaction(session_index, session_manager, api_key),
|
84 |
+
inputs=[session_index],
|
85 |
+
outputs=[evaluation_result]
|
86 |
+
)
|
87 |
+
|
88 |
+
save_btn = gr.Button("Save Data")
|
89 |
+
save_result = gr.Label()
|
90 |
+
|
91 |
+
save_btn.click(
|
92 |
+
fn=lambda session_index: save_data(session_index, session_manager, service, SHEET_ID),
|
93 |
+
inputs=[session_index],
|
94 |
+
outputs=[save_result]
|
95 |
+
)
|
96 |
|
97 |
+
app.launch(share=True)
|
98 |
+
|
configs.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
OPEN_API_KEY = "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"
|
|
|
|
requirements.txt
CHANGED
@@ -1,2 +1,3 @@
|
|
1 |
openai
|
2 |
-
gradio
|
|
|
|
1 |
openai
|
2 |
+
gradio
|
3 |
+
python-dotenv
|
save_data.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from google.oauth2 import service_account
|
3 |
+
from googleapiclient.discovery import build
|
4 |
+
import json
|
5 |
+
|
6 |
+
|
7 |
+
import os
|
8 |
+
|
9 |
+
def load_envs(local=False):
|
10 |
+
"""Load the environment variables."""
|
11 |
+
if local:
|
12 |
+
from dotenv import load_dotenv
|
13 |
+
load_dotenv()
|
14 |
+
service_account_info = json.loads(os.environ['GOOGLE_APPLICATION_CREDENTIALS_JSON'])
|
15 |
+
SHEET_ID = os.environ['SHEET_ID']
|
16 |
+
return service_account_info, SHEET_ID
|
17 |
+
|
18 |
+
|
19 |
+
def get_sheet_service(local=False):
|
20 |
+
"""Get the google sheet service object."""
|
21 |
+
service_account_info, SHEET_ID = load_envs(local=local)
|
22 |
+
# verify the service_account_info
|
23 |
+
credentials = service_account.Credentials.from_service_account_info(
|
24 |
+
service_account_info,
|
25 |
+
scopes=['https://www.googleapis.com/auth/spreadsheets']
|
26 |
+
)
|
27 |
+
|
28 |
+
# build the service object
|
29 |
+
service = build('sheets', 'v4', credentials=credentials)
|
30 |
+
return service, SHEET_ID
|
31 |
+
|
32 |
+
def col_letter(col_num):
|
33 |
+
"""Convert a column number to a column letter (1-indexed)."""
|
34 |
+
letter = ''
|
35 |
+
while col_num > 0:
|
36 |
+
col_num, remainder = divmod(col_num - 1, 26)
|
37 |
+
letter = chr(65 + remainder) + letter
|
38 |
+
return letter
|
39 |
+
|
40 |
+
def add_new_data(new_row, service, SPREADSHEET_ID, num_of_columns = 5):
|
41 |
+
"""Add new data to the spreadsheet.
|
42 |
+
new_row: list of data to be added. """
|
43 |
+
# read the existing data
|
44 |
+
range_to_read = f'Sheet1!A:{col_letter(num_of_columns)}'
|
45 |
+
|
46 |
+
result = service.spreadsheets().values().get(
|
47 |
+
spreadsheetId=SPREADSHEET_ID,
|
48 |
+
range=range_to_read
|
49 |
+
).execute()
|
50 |
+
values = result.get('values', [])
|
51 |
+
number_of_rows = len(values)
|
52 |
+
new_row = [new_row]
|
53 |
+
range_to_write = f'Sheet1!A{number_of_rows + 1}'
|
54 |
+
|
55 |
+
request_body = {
|
56 |
+
'values': new_row
|
57 |
+
}
|
58 |
+
response = service.spreadsheets().values().append(
|
59 |
+
spreadsheetId=SPREADSHEET_ID,
|
60 |
+
range=range_to_write,
|
61 |
+
valueInputOption='RAW',
|
62 |
+
insertDataOption='INSERT_ROWS',
|
63 |
+
body=request_body
|
64 |
+
).execute()
|
65 |
+
|
66 |
+
print(f"Added new row at position {number_of_rows + 1}")
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
if __name__ == "__main__":
|
71 |
+
service, SHEET_ID = get_sheet_service(local=True)
|
72 |
+
new_row = ["test1", "test2", "test3", "test4", "test5"]
|
73 |
+
add_new_data(new_row, service, SHEET_ID)
|
74 |
+
|
utils.py
CHANGED
@@ -1,5 +1,12 @@
|
|
1 |
import openai
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
def get_user_input(prompt):
|
4 |
return input(prompt)
|
5 |
|
@@ -23,37 +30,37 @@ def describe_task():
|
|
23 |
task_description = "Write a poem about the moon in 3 lines."
|
24 |
return task_description
|
25 |
|
26 |
-
|
27 |
-
|
|
|
|
|
28 |
|
29 |
-
def generate_text_with_gpt(promts):
|
30 |
-
client = openai.OpenAI(api_key=OPEN_API_KEY)
|
31 |
-
|
32 |
try:
|
33 |
-
response =
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
37 |
]
|
38 |
)
|
39 |
-
return response
|
40 |
except Exception as e:
|
41 |
-
print(f"Error
|
42 |
return ""
|
43 |
|
44 |
-
def merge_texts_parallel(task_description, human_text, ai_text):
|
45 |
prompt = f"Given the task as :{task_description}, there are two answers provided:\n" + \
|
46 |
f"The first answer: {human_text}\nThe second answer: {ai_text}\n" + \
|
47 |
f"Merge the two answers into one in a coherent way: "
|
48 |
-
return generate_text_with_gpt(prompt)
|
49 |
|
50 |
-
def merge_texts_sequential(task_description, human_text):
|
51 |
prompt = f"Given the task as :{task_description}, the human answer is: {human_text}\n" + \
|
52 |
f"Provide an answer of your own but make sure it is coherent and should be based on the human answer: "
|
53 |
-
return generate_text_with_gpt(prompt)
|
54 |
|
55 |
-
def
|
56 |
prompt = f"Given the task as :{task_description}, the answer provided is: {text}\n" + \
|
57 |
f"Evaluate the answer and provide scores between 0 and 10,\n" + \
|
58 |
f"where criteria for evaluation are correctness, relevance, novelty, fluency, and aesthetic Value:"
|
59 |
-
return generate_text_with_gpt(prompt)
|
|
|
1 |
import openai
|
2 |
+
import os
|
3 |
+
|
4 |
+
def get_api_key(local = False):
|
5 |
+
if local:
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
load_dotenv()
|
8 |
+
return os.getenv('OPEN_API_KEY')
|
9 |
+
|
10 |
def get_user_input(prompt):
|
11 |
return input(prompt)
|
12 |
|
|
|
30 |
task_description = "Write a poem about the moon in 3 lines."
|
31 |
return task_description
|
32 |
|
33 |
+
def generate_text_with_gpt(prompts, api_key = None):
|
34 |
+
"""Generate text using the GPT-3 model."""
|
35 |
+
if api_key:
|
36 |
+
openai.api_key = api_key
|
37 |
|
|
|
|
|
|
|
38 |
try:
|
39 |
+
response = openai.ChatCompletion.create(
|
40 |
+
model="gpt-3.5-turbo",
|
41 |
+
messages=[
|
42 |
+
{"role": "system", "content": "Please assist."},
|
43 |
+
{"role": "user", "content": prompts}
|
44 |
]
|
45 |
)
|
46 |
+
return response['choices'][0]['message']['content']
|
47 |
except Exception as e:
|
48 |
+
print(f"Error occurred when generating texts: {e}")
|
49 |
return ""
|
50 |
|
51 |
+
def merge_texts_parallel(task_description, human_text, ai_text, api_key = None):
|
52 |
prompt = f"Given the task as :{task_description}, there are two answers provided:\n" + \
|
53 |
f"The first answer: {human_text}\nThe second answer: {ai_text}\n" + \
|
54 |
f"Merge the two answers into one in a coherent way: "
|
55 |
+
return generate_text_with_gpt(prompt, api_key)
|
56 |
|
57 |
+
def merge_texts_sequential(task_description, human_text, api_key = None):
|
58 |
prompt = f"Given the task as :{task_description}, the human answer is: {human_text}\n" + \
|
59 |
f"Provide an answer of your own but make sure it is coherent and should be based on the human answer: "
|
60 |
+
return generate_text_with_gpt(prompt, api_key)
|
61 |
|
62 |
+
def get_evaluation_with_gpt(task_description, text, api_key = None):
|
63 |
prompt = f"Given the task as :{task_description}, the answer provided is: {text}\n" + \
|
64 |
f"Evaluate the answer and provide scores between 0 and 10,\n" + \
|
65 |
f"where criteria for evaluation are correctness, relevance, novelty, fluency, and aesthetic Value:"
|
66 |
+
return generate_text_with_gpt(prompt, api_key)
|