Spaces:
Sleeping
Sleeping
File size: 7,610 Bytes
11e4790 c567be4 9e3b21a 6530075 079aa3a b7fa139 11e4790 2320eeb 9e3b21a 8414f72 c567be4 8414f72 c567be4 f4b741f ef94623 e8ac9fc ef94623 30e87ad 6777045 30e87ad 6777045 30e87ad 915c63d c567be4 52d5702 b7fa139 52d5702 c567be4 6777045 8b6845f 6777045 bbc9832 6777045 915c63d 6777045 ef94623 26894bc d8ab087 e6657c6 d8ab087 e6657c6 d8ab087 bbc9832 d8ab087 28e9de2 9d0ff0e d8ab087 9d0ff0e 61e0e54 26894bc 4749834 ef94623 39c5160 ef94623 9e3b21a bbc9832 9e3b21a ef94623 9e3b21a ef94623 9e3b21a b7653fb c567be4 24ff085 d476719 9e3b21a ef94623 3eafd89 51d81e0 3eafd89 3e13a5c 51d81e0 3eafd89 61e0e54 915c63d d476719 6777045 d476719 915c63d 6777045 915c63d c567be4 26894bc 6777045 26894bc 11e4790 9e3b21a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 |
import gradio as gr
import pandas as pd
import requests
from bs4 import BeautifulSoup
from docx import Document
import os
from openai import OpenAI
import json
from youtube_transcript_api import YouTubeTranscriptApi
OPEN_AI_KEY = os.getenv("OPEN_AI_KEY")
client = OpenAI(api_key=OPEN_AI_KEY)
def process_file(file):
# 读取文件
if file.name.endswith('.csv'):
df = pd.read_csv(file)
text = df_to_text(df)
elif file.name.endswith('.xlsx'):
df = pd.read_excel(file)
text = df_to_text(df)
elif file.name.endswith('.docx'):
text = docx_to_text(file)
else:
raise ValueError("Unsupported file type")
df_string = df.to_string()
# 宜蘭:移除@XX@符号 to |
df_string = df_string.replace("@XX@", "|")
# 根据上传的文件内容生成问题
questions = generate_questions(df_string)
df_summarise = generate_df_summarise(df_string)
# 返回按钮文本和 DataFrame 字符串
return questions[0] if len(questions) > 0 else "", \
questions[1] if len(questions) > 1 else "", \
questions[2] if len(questions) > 2 else "", \
df_summarise, \
df_string
def df_to_text(df):
# 将 DataFrame 转换为纯文本
return df.to_string()
def docx_to_text(file):
# 将 Word 文档转换为纯文本
doc = Document(file)
return "\n".join([para.text for para in doc.paragraphs])
def process_youtube_link(link):
# 使用 YouTube API 获取逐字稿
# 假设您已经获取了 YouTube 视频的逐字稿并存储在变量 `transcript` 中
video_id = link.split("=")[-1]
transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=['zh-TW'])
# 基于逐字稿生成其他所需的输出
questions = generate_questions(transcript)
df_summarise = generate_df_summarise(transcript)
# 确保返回与 UI 组件预期匹配的输出
return questions[0] if len(questions) > 0 else "", \
questions[1] if len(questions) > 1 else "", \
questions[2] if len(questions) > 2 else "", \
df_summarise, \
transcript
def process_web_link(link):
# 抓取和解析网页内容
response = requests.get(link)
soup = BeautifulSoup(response.content, 'html.parser')
return soup.get_text()
def generate_df_summarise(df_string):
# 使用 OpenAI 生成基于上传数据的问题
sys_content = "你是一個資料分析師,服務對象為老師,請精讀資料,使用 zh-TW"
user_content = f"請根據 {df_string},大概描述這張表的欄位敘述、資料樣態與資料分析,告訴老師這張表的意義,以及可能的結論與對應方式"
messages = [
{"role": "system", "content": sys_content},
{"role": "user", "content": user_content}
]
print("=====messages=====")
print(messages)
print("=====messages=====")
request_payload = {
"model": "gpt-4-1106-preview",
"messages": messages,
"max_tokens": 4000,
}
response = client.chat.completions.create(**request_payload)
df_summarise = response.choices[0].message.content.strip()
print("=====df_summarise=====")
print(df_summarise)
print("=====df_summarise=====")
return df_summarise
def generate_questions(df_string):
# 使用 OpenAI 生成基于上传数据的问题
sys_content = "你是一個資料分析師,user為老師,請精讀資料,並用既有資料為本質猜測用戶可能會問的問題,使用 zh-TW"
user_content = f"請根據 {df_string} 生成三個問題,並用 JSON 格式返回 questions:[q1, q2, q3]"
messages = [
{"role": "system", "content": sys_content},
{"role": "user", "content": user_content}
]
response_format = { "type": "json_object" }
print("=====messages=====")
print(messages)
print("=====messages=====")
request_payload = {
"model": "gpt-4-1106-preview",
"messages": messages,
"max_tokens": 4000,
"response_format": response_format
}
response = client.chat.completions.create(**request_payload)
questions = json.loads(response.choices[0].message.content)["questions"]
print("=====json_response=====")
print(questions)
print("=====json_response=====")
return questions
def send_question(question, df_string_output, chat_history):
# 当问题按钮被点击时调用此函数
return respond(question, df_string_output, chat_history)
def respond(user_message, df_string_output, chat_history):
print("=== 變數:user_message ===")
print(user_message)
print("=== 變數:chat_history ===")
print(chat_history)
sys_content = f"你是一個資料分析師,請用 {df_string_output} 為資料進行對話,使用 zh-TW"
messages = [
{"role": "system", "content": sys_content},
{"role": "user", "content": user_message}
]
print("=====messages=====")
print(messages)
print("=====messages=====")
request_payload = {
"model": "gpt-4-1106-preview",
"messages": messages,
"max_tokens": 4000 # 設定一個較大的值,可根據需要調整
}
response = client.chat.completions.create(**request_payload)
print(response)
response_text = response.choices[0].message.content.strip()
# 更新聊天历史
new_chat_history = (user_message, response_text)
if chat_history is None:
chat_history = [new_chat_history]
else:
chat_history.append(new_chat_history)
# 返回聊天历史和空字符串清空输入框
return "", chat_history
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
file_upload = gr.File(label="Upload your CSV or Word file")
youtube_link = gr.Textbox(label="Enter YouTube Link")
web_link = gr.Textbox(label="Enter Web Page Link")
chatbot = gr.Chatbot()
msg = gr.Textbox(label="Message")
send_button = gr.Button("Send")
with gr.Column():
with gr.Tab("資料本文"):
df_string_output = gr.Textbox()
with gr.Tab("資料摘要"):
gr.Markdown("## 這是什麼樣的資料?")
df_summarise = gr.Textbox(container=True, show_copy_button=True, label="資料本文", lines=40)
with gr.Tab("常用問題"):
gr.Markdown("## 常用問題")
btn_1 = gr.Button()
btn_2 = gr.Button()
btn_3 = gr.Button()
send_button.click(
respond,
inputs=[msg, df_string_output, chatbot],
outputs=[msg, chatbot]
)
# 连接按钮点击事件
btn_1.click(respond, inputs=[btn_1, df_string_output, chatbot], outputs=[msg, chatbot])
btn_2.click(respond, inputs=[btn_2, df_string_output, chatbot], outputs=[msg, chatbot])
btn_3.click(respond, inputs=[btn_3, df_string_output, chatbot], outputs=[msg, chatbot])
# file_upload.change(process_file, inputs=file_upload, outputs=df_string_output)
file_upload.change(process_file, inputs=file_upload, outputs=[btn_1, btn_2, btn_3, df_summarise, df_string_output])
# 当输入 YouTube 链接时触发
youtube_link.change(process_youtube_link, inputs=youtube_link, outputs=[btn_1, btn_2, btn_3, df_summarise, df_string_output])
# 当输入网页链接时触发
web_link.change(process_web_link, inputs=web_link, outputs=[btn_1, btn_2, btn_3, df_summarise, df_string_output])
demo.launch()
|