Spaces:
Sleeping
Sleeping
update
Browse files- app.py +512 -419
- chatbot.py +12 -10
- local_config_example.json +20 -11
app.py
CHANGED
@@ -45,7 +45,7 @@ print(f"is_env_local: {is_env_local}")
|
|
45 |
print("===gr__version__===")
|
46 |
print(gr.__version__)
|
47 |
|
48 |
-
|
49 |
if is_env_local:
|
50 |
with open("local_config.json") as f:
|
51 |
config = json.load(f)
|
@@ -53,6 +53,11 @@ if is_env_local:
|
|
53 |
GCS_KEY = json.dumps(config["GOOGLE_APPLICATION_CREDENTIALS_JSON"])
|
54 |
DRIVE_KEY = json.dumps(config["GOOGLE_APPLICATION_CREDENTIALS_JSON"])
|
55 |
OPEN_AI_KEY = config["OPEN_AI_KEY"]
|
|
|
|
|
|
|
|
|
|
|
56 |
GROQ_API_KEY = config["GROQ_API_KEY"]
|
57 |
JUTOR_CHAT_KEY = config["JUTOR_CHAT_KEY"]
|
58 |
AWS_ACCESS_KEY = config["AWS_ACCESS_KEY"]
|
@@ -64,6 +69,11 @@ else:
|
|
64 |
GCS_KEY = os.getenv("GOOGLE_APPLICATION_CREDENTIALS_JSON")
|
65 |
DRIVE_KEY = os.getenv("GOOGLE_APPLICATION_CREDENTIALS_JSON")
|
66 |
OPEN_AI_KEY = os.getenv("OPEN_AI_KEY")
|
|
|
|
|
|
|
|
|
|
|
67 |
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
68 |
JUTOR_CHAT_KEY = os.getenv("JUTOR_CHAT_KEY")
|
69 |
AWS_ACCESS_KEY = os.getenv("AWS_ACCESS_KEY")
|
@@ -73,8 +83,9 @@ else:
|
|
73 |
|
74 |
TRANSCRIPTS = []
|
75 |
CURRENT_INDEX = 0
|
|
|
76 |
|
77 |
-
|
78 |
GROQ_CLIENT = Groq(api_key=GROQ_API_KEY)
|
79 |
GCS_SERVICE = GoogleCloudStorage(GCS_KEY)
|
80 |
GCS_CLIENT = GCS_SERVICE.client
|
@@ -85,6 +96,39 @@ BEDROCK_CLIENT = boto3.client(
|
|
85 |
region_name=AWS_REGION_NAME,
|
86 |
)
|
87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
# 驗證 password
|
89 |
def verify_password(password):
|
90 |
if password == PASSWORD:
|
@@ -371,7 +415,14 @@ def generate_transcription_by_whisper(video_id):
|
|
371 |
|
372 |
def get_video_duration(video_id):
|
373 |
yt = YouTube(f'https://www.youtube.com/watch?v={video_id}')
|
374 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
375 |
|
376 |
def process_transcript_and_screenshots_on_gcs(video_id):
|
377 |
print("====process_transcript_and_screenshots_on_gcs====")
|
@@ -384,7 +435,7 @@ def process_transcript_and_screenshots_on_gcs(video_id):
|
|
384 |
# 检查逐字稿是否存在
|
385 |
is_new_transcript = False
|
386 |
is_transcript_exists = GCS_SERVICE.check_file_exists(bucket_name, transcript_blob_name)
|
387 |
-
|
388 |
if not is_transcript_exists:
|
389 |
print("逐字稿文件不存在于GCS中,重新建立")
|
390 |
# 从YouTube获取逐字稿并上传
|
@@ -400,8 +451,9 @@ def process_transcript_and_screenshots_on_gcs(video_id):
|
|
400 |
else:
|
401 |
print("沒有找到字幕")
|
402 |
transcript = generate_transcription_by_whisper(video_id)
|
403 |
-
|
404 |
-
|
|
|
405 |
transcript_text = json.dumps(transcript, ensure_ascii=False, indent=2)
|
406 |
GCS_SERVICE.upload_json_string(bucket_name, transcript_blob_name, transcript_text)
|
407 |
|
@@ -411,7 +463,8 @@ def process_transcript_and_screenshots_on_gcs(video_id):
|
|
411 |
print("逐字稿已存在于GCS中")
|
412 |
transcript_text = GCS_SERVICE.download_as_string(bucket_name, transcript_blob_name)
|
413 |
transcript = json.loads(transcript_text)
|
414 |
-
|
|
|
415 |
|
416 |
# print("===確認其他衍生文件===")
|
417 |
# source = "gcs"
|
@@ -605,13 +658,22 @@ def download_youtube_video(youtube_id, output_path=OUTPUT_PATH):
|
|
605 |
# Create the output directory if it doesn't exist
|
606 |
if not os.path.exists(output_path):
|
607 |
os.makedirs(output_path)
|
608 |
-
|
609 |
# Download the video
|
610 |
-
|
611 |
-
|
612 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
613 |
|
614 |
-
|
615 |
|
616 |
def screenshot_youtube_video(youtube_id, snapshot_sec):
|
617 |
video_path = f'{OUTPUT_PATH}/{youtube_id}.mp4'
|
@@ -1208,7 +1270,6 @@ def change_questions(password, df_string):
|
|
1208 |
print("=====get_questions=====")
|
1209 |
return q1, q2, q3
|
1210 |
|
1211 |
-
# 「關鍵時刻」另外獨立成一個 tab,時間戳記和文字的下方附上對應的截圖,重點摘要的「關鍵時刻」加上截圖資訊
|
1212 |
def get_key_moments(video_id, formatted_simple_transcript, formatted_transcript, source):
|
1213 |
if source == "gcs":
|
1214 |
print("===get_key_moments on gcs===")
|
@@ -1277,19 +1338,19 @@ def generate_key_moments(formatted_simple_transcript, formatted_transcript):
|
|
1277 |
1. 小範圍切出不同段落的相對應時間軸的重點摘要,
|
1278 |
2. 每一小段最多不超過 1/5 的總內容,也就是大約 3~5段的重點(例如五~十分鐘的影片就一段大約1~2分鐘,最多三分鐘,但如果是超過十分鐘的影片,那一小段大約 2~3分鐘,以此類推)
|
1279 |
3. 注意不要遺漏任何一段時間軸的內容 從零秒開始
|
1280 |
-
4.
|
1281 |
-
5.
|
1282 |
-
以這種方式分析整個文本,從零秒開始分析,直到結束。這很重要
|
1283 |
6. 關鍵字從transcript extract to keyword,保留專家名字、專業術語、年份、數字、期刊名稱、地名、數學公式
|
1284 |
-
7. text,
|
1285 |
|
1286 |
-
|
|
|
1287 |
"start": "00:00",
|
1288 |
"end": "01:00",
|
1289 |
"text": "逐字稿的重點摘要",
|
1290 |
-
"transcript": "逐字稿的集合(要有合理的標點符號),要完整跟原來的一樣,不要省略",
|
1291 |
"keywords": ["關鍵字", "關鍵字"]
|
1292 |
-
|
|
|
1293 |
"""
|
1294 |
|
1295 |
try:
|
@@ -1337,8 +1398,18 @@ def generate_key_moments(formatted_simple_transcript, formatted_transcript):
|
|
1337 |
response = BEDROCK_CLIENT.invoke_model(**kwargs)
|
1338 |
response_body = json.loads(response.get('body').read())
|
1339 |
response_completion = response_body.get('content')[0].get('text')
|
|
|
|
|
1340 |
key_moments = json.loads(response_completion)["key_moments"]
|
1341 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1342 |
print("=====key_moments=====")
|
1343 |
print(key_moments)
|
1344 |
print("=====key_moments=====")
|
@@ -2011,17 +2082,22 @@ def download_exam_result(content):
|
|
2011 |
return word_path
|
2012 |
|
2013 |
# ---- Chatbot ----
|
2014 |
-
def get_instructions(content_subject, content_grade, key_moments):
|
|
|
|
|
|
|
|
|
|
|
2015 |
instructions = f"""
|
2016 |
subject: {content_subject}
|
2017 |
grade: {content_grade}
|
2018 |
context: {key_moments}
|
2019 |
Assistant Role: you are a {content_subject} assistant. you can call yourself as {content_subject} 學伴
|
2020 |
User Role: {content_grade} th-grade student.
|
2021 |
-
Method:
|
2022 |
Language: Traditional Chinese ZH-TW (it's very important), suitable for {content_grade} th-grade level.
|
2023 |
Response:
|
2024 |
-
- if user say hi or hello or any greeting, just say hi back and introduce yourself. Then
|
2025 |
- Single question, under 100 characters
|
2026 |
- include math symbols (use LaTeX $ to cover before and after, ex: $x^2$)
|
2027 |
- hint with video timestamp which format 【參考:00:00:00】.
|
@@ -2032,48 +2108,66 @@ def get_instructions(content_subject, content_grade, key_moments):
|
|
2032 |
"""
|
2033 |
return instructions
|
2034 |
|
2035 |
-
def
|
|
|
|
|
2036 |
verify_password(password)
|
|
|
2037 |
|
2038 |
-
|
2039 |
-
|
|
|
|
|
|
|
2040 |
|
2041 |
-
|
2042 |
-
|
2043 |
-
|
2044 |
-
answer = qa["answer"]
|
2045 |
-
if user_message == question and answer != "":
|
2046 |
-
print("=== in questions_answers_json==")
|
2047 |
-
print(f"question: {question}")
|
2048 |
-
print(f"answer: {answer}")
|
2049 |
-
# 更新聊天历史
|
2050 |
-
new_chat_history = (user_message, answer)
|
2051 |
-
if chat_history is None:
|
2052 |
-
chat_history = [new_chat_history]
|
2053 |
-
else:
|
2054 |
-
chat_history.append(new_chat_history)
|
2055 |
|
2056 |
-
|
2057 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2058 |
|
2059 |
-
|
|
|
|
|
2060 |
|
2061 |
-
|
2062 |
-
|
2063 |
-
raise gr.Error(error_msg)
|
2064 |
|
|
|
2065 |
if not ai_name in ["foxcat", "lili", "maimai"]:
|
2066 |
ai_name = "foxcat"
|
2067 |
|
2068 |
-
# if ai_name == "jutor":
|
2069 |
-
# ai_client = ""
|
2070 |
-
# elif ai_name == "claude3":
|
2071 |
-
# ai_client = BEDROCK_CLIENT
|
2072 |
-
# elif ai_name == "groq":
|
2073 |
-
# ai_client = GROQ_CLIENT
|
2074 |
-
# else:
|
2075 |
-
# ai_client = ""
|
2076 |
-
|
2077 |
ai_name_clients_model = {
|
2078 |
"foxcat": {
|
2079 |
"ai_name": "foxcat",
|
@@ -2085,11 +2179,6 @@ def chat_with_ai(ai_name, password, video_id, user_data, trascript_state, key_mo
|
|
2085 |
"ai_client": BEDROCK_CLIENT,
|
2086 |
"ai_model_name": "claude3",
|
2087 |
},
|
2088 |
-
# "maimai": {
|
2089 |
-
# "ai_name": "maimai",
|
2090 |
-
# "ai_client": OPEN_AI_CLIENT,
|
2091 |
-
# "ai_model_name": "openai",
|
2092 |
-
# }
|
2093 |
"maimai": {
|
2094 |
"ai_name": "maimai",
|
2095 |
"ai_client": GROQ_CLIENT,
|
@@ -2099,10 +2188,10 @@ def chat_with_ai(ai_name, password, video_id, user_data, trascript_state, key_mo
|
|
2099 |
ai_client = ai_name_clients_model.get(ai_name, "foxcat")["ai_client"]
|
2100 |
ai_model_name = ai_name_clients_model.get(ai_name, "foxcat")["ai_model_name"]
|
2101 |
|
2102 |
-
if isinstance(
|
2103 |
-
simple_transcript = json.loads(
|
2104 |
else:
|
2105 |
-
simple_transcript =
|
2106 |
|
2107 |
if isinstance(key_moments, str):
|
2108 |
key_moments_json = json.loads(key_moments)
|
@@ -2115,7 +2204,7 @@ def chat_with_ai(ai_name, password, video_id, user_data, trascript_state, key_mo
|
|
2115 |
moment.pop('transcript', None)
|
2116 |
key_moments_text = json.dumps(key_moments_json, ensure_ascii=False)
|
2117 |
|
2118 |
-
instructions = get_instructions(content_subject, content_grade, key_moments_text)
|
2119 |
|
2120 |
chatbot_config = {
|
2121 |
"video_id": video_id,
|
@@ -2129,155 +2218,185 @@ def chat_with_ai(ai_name, password, video_id, user_data, trascript_state, key_mo
|
|
2129 |
"instructions": instructions
|
2130 |
}
|
2131 |
|
2132 |
-
|
2133 |
-
|
2134 |
-
|
2135 |
-
|
2136 |
-
|
2137 |
-
|
2138 |
-
|
2139 |
-
|
2140 |
-
|
2141 |
-
|
2142 |
-
|
2143 |
-
|
2144 |
-
|
2145 |
-
|
2146 |
-
|
2147 |
-
|
2148 |
-
|
2149 |
-
|
2150 |
-
|
2151 |
-
|
2152 |
-
|
2153 |
-
|
2154 |
-
def chat_with_opan_ai_assistant(password, youtube_id, user_data, thread_id, trascript_state, key_moments, user_message, chat_history, content_subject, content_grade, questions_answers_json, socratic_mode=False):
|
2155 |
-
verify_password(password)
|
2156 |
-
|
2157 |
-
print("=====user_data=====")
|
2158 |
-
print(f"user_data: {user_data}")
|
2159 |
-
|
2160 |
-
# 先計算 user_message 是否超過 500 個字
|
2161 |
-
if len(user_message) > 1500:
|
2162 |
-
error_msg = "你的訊息太長了,請縮短訊息長度至五百��以內"
|
2163 |
-
raise gr.Error(error_msg)
|
2164 |
|
2165 |
-
|
2166 |
-
|
2167 |
-
|
2168 |
-
|
2169 |
-
|
2170 |
-
|
2171 |
-
|
2172 |
-
|
2173 |
-
|
2174 |
-
|
2175 |
-
|
2176 |
-
|
2177 |
-
|
2178 |
-
|
2179 |
-
|
2180 |
-
|
2181 |
-
# 等待 3 秒
|
2182 |
-
time.sleep(3)
|
2183 |
|
2184 |
-
|
2185 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2186 |
|
2187 |
-
|
2188 |
-
|
2189 |
-
error_msg = "此次對話超過上限(對話一輪10次)"
|
2190 |
-
raise gr.Error(error_msg)
|
2191 |
|
2192 |
-
|
2193 |
-
assistant_id = "asst_Mk151eZmKhNxzG7L9Awqz6iZ" #GPT 4 turbo
|
2194 |
-
# assistant_id = "asst_sCA7F5opi2g7AvGnYeRfoSfT" #GPT 3.5 turbo
|
2195 |
-
|
2196 |
-
client = OPEN_AI_CLIENT
|
2197 |
-
# 直接安排逐字稿資料 in instructions
|
2198 |
-
# if isinstance(trascript_state, str):
|
2199 |
-
# trascript_json = json.loads(trascript_state)
|
2200 |
-
# else:
|
2201 |
-
# trascript_json = trascript_state
|
2202 |
-
# # 移除 embed_url, screenshot_path
|
2203 |
-
# for entry in trascript_json:
|
2204 |
-
# entry.pop('end_time', None)
|
2205 |
-
# trascript_text = json.dumps(trascript_json, ensure_ascii=False)
|
2206 |
|
2207 |
-
|
2208 |
-
|
2209 |
-
|
2210 |
-
|
2211 |
-
|
2212 |
-
|
2213 |
-
|
2214 |
-
|
2215 |
-
|
2216 |
-
|
2217 |
-
|
2218 |
-
instructions = get_instructions(content_subject, content_grade, key_moments_text)
|
2219 |
-
print("=== instructions ===")
|
2220 |
-
print(instructions)
|
2221 |
|
2222 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2223 |
if not thread_id:
|
2224 |
-
thread = client.beta.threads.create(
|
2225 |
-
|
2226 |
-
)
|
2227 |
thread_id = thread.id
|
2228 |
else:
|
2229 |
thread = client.beta.threads.retrieve(thread_id)
|
2230 |
-
|
2231 |
-
# add meta data to thread
|
2232 |
-
client.beta.threads.update(
|
2233 |
-
thread_id=thread_id,
|
2234 |
-
metadata={
|
2235 |
-
"youtube_id": youtube_id,
|
2236 |
-
"user_data": user_data,
|
2237 |
-
"content_subject": content_subject,
|
2238 |
-
"content_grade": content_grade,
|
2239 |
-
"socratic_mode": socratic_mode,
|
2240 |
-
"assistant_id": assistant_id,
|
2241 |
-
"is_streaming": "false",
|
2242 |
-
}
|
2243 |
-
)
|
2244 |
|
2245 |
-
|
2246 |
-
|
2247 |
-
thread_id=thread.id,
|
2248 |
-
role="user",
|
2249 |
-
content=user_message + "/n 請嚴格遵循instructions,擔任一位蘇格拉底家教,絕對不要重複 user 的問句,請用引導的方式指引方向,請一定要用繁體中文回答 zh-TW,並用台灣人的禮貌口語表達,回答時不要特別說明這是台灣人的語氣,請在回答的最後標註【參考:(時):(分):(秒)】,(如果是反問學生,就只問一個問題,請幫助學生更好的理解資料,字數在100字以內,回答時如果講到數學專有名詞,請用數學符號代替文字(Latex 用 $ 字號 render, ex: $x^2$)"
|
2250 |
-
)
|
2251 |
|
2252 |
-
#
|
2253 |
-
|
2254 |
-
thread_id=thread.id,
|
2255 |
-
assistant_id=assistant_id,
|
2256 |
-
instructions=instructions,
|
2257 |
-
)
|
2258 |
|
2259 |
-
#
|
|
|
|
|
|
|
2260 |
run_status = poll_run_status(run.id, thread.id, timeout=30)
|
2261 |
-
|
2262 |
if run_status == "completed":
|
2263 |
messages = client.beta.threads.messages.list(thread_id=thread.id)
|
2264 |
-
# [MessageContentText(text=Text(annotations=[], value='您好!有什麼我可以幫助您的嗎?如果有任何問題或需要指導,請隨時告訴我!'), type='text')]
|
2265 |
response_text = messages.data[0].content[0].text.value
|
2266 |
else:
|
2267 |
response_text = "學習精靈有點累,請稍後再試!"
|
2268 |
|
2269 |
-
# 更新聊天历史
|
2270 |
-
new_chat_history = (user_message, response_text)
|
2271 |
-
if chat_history is None:
|
2272 |
-
chat_history = [new_chat_history]
|
2273 |
-
else:
|
2274 |
-
chat_history.append(new_chat_history)
|
2275 |
except Exception as e:
|
2276 |
-
|
2277 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2278 |
|
2279 |
-
|
2280 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2281 |
|
2282 |
def process_open_ai_audio_to_chatbot(password, audio_url):
|
2283 |
verify_password(password)
|
@@ -2287,15 +2406,37 @@ def process_open_ai_audio_to_chatbot(password, audio_url):
|
|
2287 |
if file_size > 2000000:
|
2288 |
raise gr.Error("檔案大小超過,請不要超過 60秒")
|
2289 |
else:
|
2290 |
-
|
2291 |
model="whisper-1",
|
2292 |
file=audio_file,
|
2293 |
response_format="text"
|
2294 |
)
|
2295 |
# response 拆解 dict
|
2296 |
-
print("===
|
2297 |
-
print(
|
2298 |
-
print("===
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2299 |
else:
|
2300 |
response = ""
|
2301 |
|
@@ -2350,7 +2491,7 @@ def poll_run_status(run_id, thread_id, timeout=600, poll_interval=5):
|
|
2350 |
|
2351 |
return run.status
|
2352 |
|
2353 |
-
def chat_with_opan_ai_assistant_streaming(user_message, chat_history, password, video_id, user_data, thread_id, trascript, key_moments, content_subject, content_grade):
|
2354 |
verify_password(password)
|
2355 |
|
2356 |
print("=====user_data=====")
|
@@ -2365,13 +2506,14 @@ def chat_with_opan_ai_assistant_streaming(user_message, chat_history, password,
|
|
2365 |
raise gr.Error(error_msg)
|
2366 |
|
2367 |
# 如果 chat_history 超過 10 則訊息,直接 return "對話超過上限"
|
2368 |
-
if chat_history is not None and len(chat_history) >
|
2369 |
-
error_msg = "此次對話超過上限(對話一輪
|
2370 |
raise gr.Error(error_msg)
|
2371 |
|
2372 |
try:
|
2373 |
-
assistant_id =
|
2374 |
-
# assistant_id =
|
|
|
2375 |
client = OPEN_AI_CLIENT
|
2376 |
# 直接安排逐字稿資料 in instructions
|
2377 |
# if isinstance(trascript, str):
|
@@ -2393,7 +2535,7 @@ def chat_with_opan_ai_assistant_streaming(user_message, chat_history, password,
|
|
2393 |
moment.pop('transcript', None)
|
2394 |
key_moments_text = json.dumps(key_moments_json, ensure_ascii=False)
|
2395 |
|
2396 |
-
instructions = get_instructions(content_subject, content_grade, key_moments_text)
|
2397 |
# 创建线程
|
2398 |
if not thread_id:
|
2399 |
thread = client.beta.threads.create()
|
@@ -2447,25 +2589,31 @@ def create_thread_id():
|
|
2447 |
def chatbot_select(chatbot_name):
|
2448 |
chatbot_select_accordion_visible = gr.update(visible=False)
|
2449 |
all_chatbot_select_btn_visible = gr.update(visible=True)
|
2450 |
-
chatbot_open_ai_visible = gr.update(visible=False)
|
2451 |
chatbot_open_ai_streaming_visible = gr.update(visible=False)
|
2452 |
-
|
2453 |
ai_name_update = gr.update(value="foxcat")
|
2454 |
-
|
|
|
2455 |
if chatbot_name == "chatbot_open_ai":
|
2456 |
-
|
|
|
2457 |
elif chatbot_name == "chatbot_open_ai_streaming":
|
2458 |
chatbot_open_ai_streaming_visible = gr.update(visible=True)
|
|
|
2459 |
else:
|
2460 |
-
|
2461 |
-
|
|
|
|
|
2462 |
|
2463 |
-
return chatbot_select_accordion_visible, all_chatbot_select_btn_visible,
|
|
|
|
|
2464 |
|
2465 |
-
def update_avatar_images(avatar_images,
|
2466 |
value = [[
|
2467 |
"請問你是誰?",
|
2468 |
-
|
2469 |
]]
|
2470 |
ai_chatbot_update = gr.update(avatar_images=avatar_images, value=value)
|
2471 |
return ai_chatbot_update
|
@@ -2523,9 +2671,9 @@ def init_params(text, request: gr.Request):
|
|
2523 |
lesson_plan_accordion = gr.update(visible=True)
|
2524 |
exit_ticket_accordion = gr.update(visible=True)
|
2525 |
|
2526 |
-
chatbot_open_ai = gr.update(visible=False)
|
2527 |
chatbot_open_ai_streaming = gr.update(visible=False)
|
2528 |
-
|
|
|
2529 |
|
2530 |
# if youtube_link in query_params
|
2531 |
if "youtube_id" in request.query_params:
|
@@ -2544,11 +2692,12 @@ def init_params(text, request: gr.Request):
|
|
2544 |
worksheet_accordion = gr.update(visible=False)
|
2545 |
lesson_plan_accordion = gr.update(visible=False)
|
2546 |
exit_ticket_accordion = gr.update(visible=False)
|
|
|
2547 |
|
2548 |
return admin, reading_passage_admin, summary_admin, see_detail, \
|
2549 |
worksheet_accordion, lesson_plan_accordion, exit_ticket_accordion, \
|
2550 |
password_text, youtube_link, \
|
2551 |
-
|
2552 |
|
2553 |
def update_state(content_subject, content_grade, trascript, key_moments, questions_answers):
|
2554 |
# inputs=[content_subject, content_grade, df_string_output],
|
@@ -2565,16 +2714,12 @@ def update_state(content_subject, content_grade, trascript, key_moments, questio
|
|
2565 |
question_1 = questions_answers_json[0]["question"]
|
2566 |
question_2 = questions_answers_json[1]["question"]
|
2567 |
question_3 = questions_answers_json[2]["question"]
|
2568 |
-
btn_1 = question_1
|
2569 |
-
btn_2 = question_2
|
2570 |
-
btn_3 = question_3
|
2571 |
ai_chatbot_question_1 = question_1
|
2572 |
ai_chatbot_question_2 = question_2
|
2573 |
ai_chatbot_question_3 = question_3
|
2574 |
|
2575 |
return content_subject_state, content_grade_state, trascript_state, key_moments_state, \
|
2576 |
streaming_chat_thread_id_state, \
|
2577 |
-
btn_1, btn_2, btn_3, \
|
2578 |
ai_chatbot_question_1, ai_chatbot_question_2, ai_chatbot_question_3
|
2579 |
|
2580 |
|
@@ -2603,65 +2748,6 @@ HEAD = """
|
|
2603 |
});
|
2604 |
}
|
2605 |
</script>
|
2606 |
-
|
2607 |
-
<script>
|
2608 |
-
function changeImage(direction, count, galleryIndex) {
|
2609 |
-
// Find the current visible image by iterating over possible indices
|
2610 |
-
var currentImage = null;
|
2611 |
-
var currentIndex = -1;
|
2612 |
-
for (var i = 0; i < count; i++) {
|
2613 |
-
var img = document.querySelector('.slide-image-' + galleryIndex + '-' + i);
|
2614 |
-
if (img && img.style.display !== 'none') {
|
2615 |
-
currentImage = img;
|
2616 |
-
currentIndex = i;
|
2617 |
-
break;
|
2618 |
-
}
|
2619 |
-
}
|
2620 |
-
|
2621 |
-
// If no current image is visible, show the first one and return
|
2622 |
-
if (currentImage === null) {
|
2623 |
-
document.querySelector('.slide-image-' + galleryIndex + '-0').style.display = 'block';
|
2624 |
-
console.error('No current image found for galleryIndex ' + galleryIndex + ', defaulting to first image.');
|
2625 |
-
return;
|
2626 |
-
}
|
2627 |
-
|
2628 |
-
// Hide the current image
|
2629 |
-
currentImage.style.display = 'none';
|
2630 |
-
|
2631 |
-
// Calculate the index of the next image to show
|
2632 |
-
var newIndex = (currentIndex + direction + count) % count;
|
2633 |
-
|
2634 |
-
// Select the next image and show it
|
2635 |
-
var nextImage = document.querySelector('.slide-image-' + galleryIndex + '-' + newIndex);
|
2636 |
-
if (nextImage) {
|
2637 |
-
nextImage.style.display = 'block';
|
2638 |
-
} else {
|
2639 |
-
console.error('No image found for galleryIndex ' + galleryIndex + ' and newIndex ' + newIndex);
|
2640 |
-
}
|
2641 |
-
}
|
2642 |
-
</script>
|
2643 |
-
|
2644 |
-
<script>
|
2645 |
-
var selectButtons = document.querySelectorAll('.chatbot_select_btn');
|
2646 |
-
|
2647 |
-
// 为每个按钮添加点击事件监听器
|
2648 |
-
selectButtons.forEach(function(button) {
|
2649 |
-
button.addEventListener('click', function() {
|
2650 |
-
// 获取 #chatbot_select_accordion 下的第一个 button 元素
|
2651 |
-
var firstButton = document.querySelector('#chatbot_select_accordion button');
|
2652 |
-
var displayDiv = document.querySelector('#chatbot_select_accordion div:nth-child(3)');
|
2653 |
-
// 检查这个按钮是否存在
|
2654 |
-
if (firstButton) {
|
2655 |
-
// 移除 'open' 类
|
2656 |
-
firstButton.classList.remove('open');
|
2657 |
-
}
|
2658 |
-
if (displayDiv) {
|
2659 |
-
// display none
|
2660 |
-
displayDiv.style.display = 'none';
|
2661 |
-
}
|
2662 |
-
});
|
2663 |
-
});
|
2664 |
-
</script>
|
2665 |
"""
|
2666 |
|
2667 |
with gr.Blocks(theme=gr.themes.Base(primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.amber, text_size = gr.themes.sizes.text_lg), head=HEAD) as demo:
|
@@ -2682,13 +2768,31 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue=gr.themes.colors.orange, seconda
|
|
2682 |
with gr.Tab("AI小精靈"):
|
2683 |
with gr.Row():
|
2684 |
all_chatbot_select_btn = gr.Button("選擇 AI 小精靈 👈", elem_id="all_chatbot_select_btn", visible=False, variant="secondary", size="sm")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2685 |
with gr.Accordion("選擇 AI 小精靈", elem_id="chatbot_select_accordion") as chatbot_select_accordion:
|
2686 |
with gr.Row():
|
2687 |
user_avatar = "https://em-content.zobj.net/source/google/263/flushed-face_1f633.png"
|
2688 |
-
|
2689 |
-
with gr.Column(scale=1, variant="panel", visible=
|
2690 |
-
|
2691 |
-
|
|
|
2692 |
我可以陪你一起學習本次的內容,有什麼問題都可以問我喔!\n
|
2693 |
🤔 如果你不知道怎麼發問,可以點擊左下方的問題一、問題二、問題三,我會幫你生成問題!\n
|
2694 |
🗣️ 也可以點擊右下方用語音輸入,我會幫你轉換成文字,厲害吧!\n
|
@@ -2697,21 +2801,10 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue=gr.themes.colors.orange, seconda
|
|
2697 |
🦄 如果達到上限,或是遇到精靈很累,請問問其他朋友,像是飛特音速說話的速度比較快,你是否跟得上呢?你也可以和其他精靈互動看看喔!\n
|
2698 |
"""
|
2699 |
chatbot_open_ai_name = gr.State("chatbot_open_ai")
|
2700 |
-
gr.Image(value=
|
2701 |
-
|
2702 |
-
gr.Markdown(value=
|
2703 |
-
|
2704 |
-
streaming_chatbot_avatar_url = "https://storage.googleapis.com/wpassets.junyiacademy.org/1/2020/11/1-%E6%98%9F%E7%A9%BA%E9%A0%AD%E8%B2%BC-%E5%A4%AA%E7%A9%BA%E7%8B%90%E7%8B%B8%E8%B2%93-150x150.png"
|
2705 |
-
streaming_chatbot_description = """Hi,我是【飛特音速】, \n
|
2706 |
-
說話比較快,但有什麼問題都可以問我喔! \n
|
2707 |
-
🚀 我沒有預設問題、也沒有語音輸入,適合快問快答,一起練習問出好問題吧 \n
|
2708 |
-
🔠 擅長用文字表達的你,可以用鍵盤輸入你的問題,我會盡力回答你的問題喔\n
|
2709 |
-
💤 我還在成長,體力有限,每一次學習只能回答十個問題,請讓我休息一下再問問題喔~
|
2710 |
-
"""
|
2711 |
-
chatbot_open_ai_streaming_name = gr.State("chatbot_open_ai_streaming")
|
2712 |
-
gr.Image(value=streaming_chatbot_avatar_url, height=100, width=100, show_label=False, show_download_button=False)
|
2713 |
-
chatbot_open_ai_streaming_select_btn = gr.Button("👆選擇【飛特音速】", elem_id="streaming_chatbot_btn", visible=True, variant="primary")
|
2714 |
-
gr.Markdown(value=streaming_chatbot_description, visible=True)
|
2715 |
with gr.Column(scale=1, variant="panel"):
|
2716 |
foxcat_chatbot_avatar_url = "https://storage.googleapis.com/wpassets.junyiacademy.org/1/2020/06/%E7%A7%91%E5%AD%B8%E5%BE%BD%E7%AB%A0-2-150x150.png"
|
2717 |
foxcat_avatar_images = gr.State([user_avatar, foxcat_chatbot_avatar_url])
|
@@ -2756,37 +2849,23 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue=gr.themes.colors.orange, seconda
|
|
2756 |
gr.Image(value=maimai_chatbot_avatar_url, height=100, width=100, show_label=False, show_download_button=False)
|
2757 |
maimai_chatbot_select_btn = gr.Button("👆選擇【麥麥】", visible=True, variant="primary", elem_classes="chatbot_select_btn")
|
2758 |
maimai_chatbot_description_value = gr.Markdown(value=maimai_chatbot_description, visible=True)
|
2759 |
-
|
2760 |
-
|
2761 |
-
|
2762 |
-
|
2763 |
-
|
2764 |
-
|
2765 |
-
|
2766 |
-
|
2767 |
-
|
2768 |
-
|
2769 |
-
|
2770 |
-
|
2771 |
-
|
2772 |
-
|
2773 |
-
|
2774 |
-
|
2775 |
-
|
2776 |
-
with gr.Row():
|
2777 |
-
thread_id = gr.Textbox(label="thread_id", visible=False)
|
2778 |
-
socratic_mode_btn = gr.Checkbox(label="蘇格拉底家教助理模式", value=True, visible=False)
|
2779 |
-
with gr.Row():
|
2780 |
-
with gr.Accordion("你也有類似的問題想問嗎?", open=False) as ask_questions_accordion:
|
2781 |
-
btn_1 = gr.Button("問題一")
|
2782 |
-
btn_2 = gr.Button("問題一")
|
2783 |
-
btn_3 = gr.Button("問題一")
|
2784 |
-
gr.Markdown("### 重新生成問題")
|
2785 |
-
btn_create_question = gr.Button("生成其他問題", variant="primary")
|
2786 |
-
openai_chatbot_audio_input = gr.Audio(sources=["microphone"], type="filepath", max_length=60, label="語音輸入")
|
2787 |
-
with gr.Row():
|
2788 |
-
msg = gr.Textbox(label="訊息",scale=3)
|
2789 |
-
send_button = gr.Button("送出", variant="primary", scale=1)
|
2790 |
with gr.Row("飛特音速") as chatbot_open_ai_streaming:
|
2791 |
with gr.Column():
|
2792 |
streaming_chat_greeting = """
|
@@ -2795,7 +2874,7 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue=gr.themes.colors.orange, seconda
|
|
2795 |
🔠 鍵盤輸入你的問題,我會盡力回答你的問題喔!\n
|
2796 |
💤 我還在成長,體力有限,每一次學習只能回答十個問題,請讓我休息一下再問問題喔!
|
2797 |
"""
|
2798 |
-
additional_inputs = [password, video_id, user_data, streaming_chat_thread_id_state, trascript_state, key_moments_state, content_subject_state, content_grade_state]
|
2799 |
streaming_chat = gr.ChatInterface(
|
2800 |
fn=chat_with_opan_ai_assistant_streaming,
|
2801 |
additional_inputs=additional_inputs,
|
@@ -2806,7 +2885,7 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue=gr.themes.colors.orange, seconda
|
|
2806 |
stop_btn=None,
|
2807 |
description=streaming_chat_greeting
|
2808 |
)
|
2809 |
-
with gr.Row("
|
2810 |
with gr.Column():
|
2811 |
ai_chatbot_greeting = [[
|
2812 |
"請問你是誰?",
|
@@ -2817,27 +2896,19 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue=gr.themes.colors.orange, seconda
|
|
2817 |
💤 精靈們體力都有限,每一次學習只能回答十個問題,請讓我休息一下再問問題喔!
|
2818 |
""",
|
2819 |
]]
|
2820 |
-
ai_name = gr.Dropdown(
|
2821 |
-
label="選擇 AI 助理",
|
2822 |
-
choices=[
|
2823 |
-
("梨梨","lili"),
|
2824 |
-
("麥麥","maimai"),
|
2825 |
-
("狐狸貓","foxcat")
|
2826 |
-
],
|
2827 |
-
value="foxcat",
|
2828 |
-
visible=False
|
2829 |
-
)
|
2830 |
-
ai_chatbot = gr.Chatbot(label="ai_chatbot", show_share_button=False, likeable=True, show_label=False, latex_delimiters=latex_delimiters, value=ai_chatbot_greeting)
|
2831 |
-
ai_chatbot_socratic_mode_btn = gr.Checkbox(label="蘇格拉底家教助理模式", value=True, visible=False)
|
2832 |
with gr.Row():
|
2833 |
-
|
|
|
|
|
2834 |
ai_chatbot_question_1 = gr.Button("問題一")
|
2835 |
ai_chatbot_question_2 = gr.Button("問題一")
|
2836 |
ai_chatbot_question_3 = gr.Button("問題一")
|
|
|
2837 |
ai_chatbot_audio_input = gr.Audio(sources=["microphone"], type="filepath", max_length=60, label="語音輸入")
|
2838 |
with gr.Row():
|
2839 |
ai_msg = gr.Textbox(label="訊息輸入",scale=3)
|
2840 |
-
ai_send_button = gr.Button("送出", variant="primary",scale=1)
|
|
|
2841 |
with gr.Tab("文章模式"):
|
2842 |
with gr.Row():
|
2843 |
reading_passage = gr.Markdown(show_label=False, latex_delimiters = [{"left": "$", "right": "$", "display": False}])
|
@@ -3021,15 +3092,74 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue=gr.themes.colors.orange, seconda
|
|
3021 |
with gr.Tab("心智圖",elem_id="mind_map_tab"):
|
3022 |
mind_map_html = gr.HTML()
|
3023 |
|
3024 |
-
# --- Event ---
|
3025 |
-
chatbot_select_outputs=[chatbot_select_accordion, all_chatbot_select_btn, chatbot_open_ai, chatbot_open_ai_streaming, chatbot_jutor, ai_name]
|
3026 |
-
|
3027 |
# OPEN AI CHATBOT SELECT
|
3028 |
-
|
3029 |
-
|
3030 |
-
|
3031 |
-
|
3032 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3033 |
chatbot_open_ai_streaming_select_btn.click(
|
3034 |
chatbot_select,
|
3035 |
inputs=[chatbot_open_ai_streaming_name],
|
@@ -3039,55 +3169,13 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue=gr.themes.colors.orange, seconda
|
|
3039 |
inputs=[],
|
3040 |
outputs=[streaming_chat_thread_id_state]
|
3041 |
)
|
3042 |
-
|
3043 |
-
chatbot_select,
|
3044 |
-
inputs=[foxcat_chatbot_name],
|
3045 |
-
outputs=chatbot_select_outputs
|
3046 |
-
).then(
|
3047 |
-
update_avatar_images,
|
3048 |
-
inputs=[foxcat_avatar_images, foxcat_chatbot_description_value],
|
3049 |
-
outputs=[ai_chatbot],
|
3050 |
-
scroll_to_output=True
|
3051 |
-
)
|
3052 |
-
lili_chatbot_select_btn.click(
|
3053 |
-
chatbot_select,
|
3054 |
-
inputs=[lili_chatbot_name],
|
3055 |
-
outputs=chatbot_select_outputs
|
3056 |
-
).then(
|
3057 |
-
update_avatar_images,
|
3058 |
-
inputs=[lili_avatar_images, lili_chatbot_description_value],
|
3059 |
-
outputs=[ai_chatbot],
|
3060 |
-
scroll_to_output=True
|
3061 |
-
)
|
3062 |
-
maimai_chatbot_select_btn.click(
|
3063 |
-
chatbot_select,
|
3064 |
-
inputs=[maimai_chatbot_name],
|
3065 |
-
outputs=chatbot_select_outputs
|
3066 |
-
).then(
|
3067 |
-
update_avatar_images,
|
3068 |
-
inputs=[maimai_avatar_images, maimai_chatbot_description_value],
|
3069 |
-
outputs=[ai_chatbot],
|
3070 |
-
scroll_to_output=True
|
3071 |
-
)
|
3072 |
# ALL CHATBOT SELECT LIST
|
3073 |
all_chatbot_select_btn.click(
|
3074 |
show_all_chatbot_accordion,
|
3075 |
inputs=[],
|
3076 |
outputs=[chatbot_select_accordion, all_chatbot_select_btn]
|
3077 |
)
|
3078 |
-
|
3079 |
-
# OPENAI ASSISTANT CHATBOT 模式
|
3080 |
-
send_button.click(
|
3081 |
-
chat_with_opan_ai_assistant,
|
3082 |
-
inputs=[password, video_id, user_data, thread_id, trascript_state, key_moments, msg, chatbot, content_subject, content_grade, questions_answers_json, socratic_mode_btn],
|
3083 |
-
outputs=[msg, chatbot, thread_id],
|
3084 |
-
scroll_to_output=True
|
3085 |
-
)
|
3086 |
-
openai_chatbot_audio_input.change(
|
3087 |
-
process_open_ai_audio_to_chatbot,
|
3088 |
-
inputs=[password, openai_chatbot_audio_input],
|
3089 |
-
outputs=[msg]
|
3090 |
-
)
|
3091 |
# OPENAI ASSISTANT CHATBOT 連接按鈕點擊事件
|
3092 |
def setup_question_button_click(button, inputs_list, outputs_list, chat_func, scroll_to_output=True):
|
3093 |
button.click(
|
@@ -3096,35 +3184,43 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue=gr.themes.colors.orange, seconda
|
|
3096 |
outputs=outputs_list,
|
3097 |
scroll_to_output=scroll_to_output
|
3098 |
)
|
3099 |
-
|
3100 |
-
|
3101 |
-
|
3102 |
-
|
3103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3104 |
|
3105 |
# 為生成問題按鈕設定特殊的點擊事件
|
3106 |
-
|
|
|
|
|
|
|
|
|
|
|
3107 |
change_questions,
|
3108 |
inputs=[password, df_string_output],
|
3109 |
outputs=question_buttons
|
3110 |
)
|
3111 |
-
|
3112 |
-
|
3113 |
-
|
3114 |
-
|
3115 |
-
inputs=[ai_name, password, video_id, user_data, trascript_state, key_moments, ai_msg, ai_chatbot, content_subject, content_grade, questions_answers_json, ai_chatbot_socratic_mode_btn],
|
3116 |
-
outputs=[ai_msg, ai_chatbot],
|
3117 |
-
scroll_to_output=True
|
3118 |
)
|
3119 |
-
# 其他精靈 ai_chatbot 连接按钮点击事件
|
3120 |
-
ai_chatbot_buttons = [ai_chatbot_question_1, ai_chatbot_question_2, ai_chatbot_question_3]
|
3121 |
-
for ai_question_btn in ai_chatbot_buttons:
|
3122 |
-
inputs_list = [ai_name, password, video_id, user_data, trascript_state, key_moments, ai_question_btn, ai_chatbot, content_subject, content_grade, questions_answers_json, ai_chatbot_socratic_mode_btn]
|
3123 |
-
outputs_list = [ai_msg, ai_chatbot]
|
3124 |
-
setup_question_button_click(ai_question_btn, inputs_list, outputs_list, chat_with_ai)
|
3125 |
-
|
3126 |
-
# file_upload.change(process_file, inputs=file_upload, outputs=df_string_output)
|
3127 |
-
# file_upload.change(process_file, inputs=file_upload, outputs=[btn_1, btn_2, btn_3, df_summarise, df_string_output])
|
3128 |
|
3129 |
# 当输入 YouTube 链接时触发
|
3130 |
process_youtube_link_inputs = [password, youtube_link]
|
@@ -3160,9 +3256,6 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue=gr.themes.colors.orange, seconda
|
|
3160 |
trascript_state,
|
3161 |
key_moments_state,
|
3162 |
streaming_chat_thread_id_state,
|
3163 |
-
btn_1,
|
3164 |
-
btn_2,
|
3165 |
-
btn_3,
|
3166 |
ai_chatbot_question_1,
|
3167 |
ai_chatbot_question_2,
|
3168 |
ai_chatbot_question_3
|
@@ -3494,9 +3587,9 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue=gr.themes.colors.orange, seconda
|
|
3494 |
exit_ticket_accordion,
|
3495 |
password,
|
3496 |
youtube_link,
|
3497 |
-
chatbot_open_ai,
|
3498 |
chatbot_open_ai_streaming,
|
3499 |
-
|
|
|
3500 |
]
|
3501 |
demo.load(
|
3502 |
init_params,
|
|
|
45 |
print("===gr__version__===")
|
46 |
print(gr.__version__)
|
47 |
|
48 |
+
# KEY CONFIG
|
49 |
if is_env_local:
|
50 |
with open("local_config.json") as f:
|
51 |
config = json.load(f)
|
|
|
53 |
GCS_KEY = json.dumps(config["GOOGLE_APPLICATION_CREDENTIALS_JSON"])
|
54 |
DRIVE_KEY = json.dumps(config["GOOGLE_APPLICATION_CREDENTIALS_JSON"])
|
55 |
OPEN_AI_KEY = config["OPEN_AI_KEY"]
|
56 |
+
OPEN_AI_ASSISTANT_ID_GPT4_BOT1 = config["OPEN_AI_ASSISTANT_ID_GPT4_BOT1"]
|
57 |
+
OPEN_AI_ASSISTANT_ID_GPT3_BOT1 = config["OPEN_AI_ASSISTANT_ID_GPT3_BOT1"]
|
58 |
+
OPEN_AI_KEY_BOT2 = config["OPEN_AI_KEY_BOT2"]
|
59 |
+
OPEN_AI_ASSISTANT_ID_GPT4_BOT2 = config["OPEN_AI_ASSISTANT_ID_GPT4_BOT2"]
|
60 |
+
OPEN_AI_ASSISTANT_ID_GPT3_BOT2 = config["OPEN_AI_ASSISTANT_ID_GPT3_BOT2"]
|
61 |
GROQ_API_KEY = config["GROQ_API_KEY"]
|
62 |
JUTOR_CHAT_KEY = config["JUTOR_CHAT_KEY"]
|
63 |
AWS_ACCESS_KEY = config["AWS_ACCESS_KEY"]
|
|
|
69 |
GCS_KEY = os.getenv("GOOGLE_APPLICATION_CREDENTIALS_JSON")
|
70 |
DRIVE_KEY = os.getenv("GOOGLE_APPLICATION_CREDENTIALS_JSON")
|
71 |
OPEN_AI_KEY = os.getenv("OPEN_AI_KEY")
|
72 |
+
OPEN_AI_ASSISTANT_ID_GPT4_BOT1 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT4_BOT1")
|
73 |
+
OPEN_AI_ASSISTANT_ID_GPT3_BOT1 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT3_BOT1")
|
74 |
+
OPEN_AI_KEY_BOT2 = os.getenv("OPEN_AI_KEY_BOT2")
|
75 |
+
OPEN_AI_ASSISTANT_ID_GPT4_BOT2 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT4_BOT2")
|
76 |
+
OPEN_AI_ASSISTANT_ID_GPT3_BOT2 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT3_BOT2")
|
77 |
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
78 |
JUTOR_CHAT_KEY = os.getenv("JUTOR_CHAT_KEY")
|
79 |
AWS_ACCESS_KEY = os.getenv("AWS_ACCESS_KEY")
|
|
|
83 |
|
84 |
TRANSCRIPTS = []
|
85 |
CURRENT_INDEX = 0
|
86 |
+
CHAT_LIMIT = 10
|
87 |
|
88 |
+
# CLIENTS CONFIG
|
89 |
GROQ_CLIENT = Groq(api_key=GROQ_API_KEY)
|
90 |
GCS_SERVICE = GoogleCloudStorage(GCS_KEY)
|
91 |
GCS_CLIENT = GCS_SERVICE.client
|
|
|
96 |
region_name=AWS_REGION_NAME,
|
97 |
)
|
98 |
|
99 |
+
# check open ai access
|
100 |
+
def check_open_ai_access(open_ai_api_key):
|
101 |
+
# set key in OpenAI client and run to check status, if it is work, return True
|
102 |
+
client = OpenAI(api_key=open_ai_api_key)
|
103 |
+
try:
|
104 |
+
response = client.chat.completions.create(
|
105 |
+
model="gpt-3.5-turbo",
|
106 |
+
messages=[
|
107 |
+
{"role": "user", "content": "This is a test."},
|
108 |
+
],
|
109 |
+
)
|
110 |
+
if response.choices[0].message.content:
|
111 |
+
return True
|
112 |
+
else:
|
113 |
+
return False
|
114 |
+
except Exception as e:
|
115 |
+
print(f"Error: {str(e)}")
|
116 |
+
return False
|
117 |
+
|
118 |
+
open_ai_api_key_assistant_id_list = [
|
119 |
+
{"account":"bot1", "open_ai_api_key": OPEN_AI_KEY, "assistant_gpt4_id": OPEN_AI_ASSISTANT_ID_GPT4_BOT1, "assistant_gpt3_id": OPEN_AI_ASSISTANT_ID_GPT3_BOT1},
|
120 |
+
{"account":"bot2", "open_ai_api_key": OPEN_AI_KEY_BOT2, "assistant_gpt4_id": OPEN_AI_ASSISTANT_ID_GPT4_BOT2, "assistant_gpt3_id": OPEN_AI_ASSISTANT_ID_GPT3_BOT2},
|
121 |
+
]
|
122 |
+
for open_ai_api_key_assistant_id in open_ai_api_key_assistant_id_list:
|
123 |
+
account = open_ai_api_key_assistant_id["account"]
|
124 |
+
open_ai_api_key = open_ai_api_key_assistant_id["open_ai_api_key"]
|
125 |
+
if check_open_ai_access(open_ai_api_key):
|
126 |
+
OPEN_AI_CLIENT = OpenAI(api_key=open_ai_api_key)
|
127 |
+
OPEN_AI_ASSISTANT_ID_GPT4 = open_ai_api_key_assistant_id["assistant_gpt4_id"]
|
128 |
+
OPEN_AI_ASSISTANT_ID_GPT3 = open_ai_api_key_assistant_id["assistant_gpt3_id"]
|
129 |
+
print(f"OpenAI access is OK, account: {account}")
|
130 |
+
break
|
131 |
+
|
132 |
# 驗證 password
|
133 |
def verify_password(password):
|
134 |
if password == PASSWORD:
|
|
|
415 |
|
416 |
def get_video_duration(video_id):
|
417 |
yt = YouTube(f'https://www.youtube.com/watch?v={video_id}')
|
418 |
+
try:
|
419 |
+
video_duration = yt.length
|
420 |
+
except:
|
421 |
+
video_duration = None
|
422 |
+
|
423 |
+
print(f"video_duration: {video_duration}")
|
424 |
+
|
425 |
+
return video_duration
|
426 |
|
427 |
def process_transcript_and_screenshots_on_gcs(video_id):
|
428 |
print("====process_transcript_and_screenshots_on_gcs====")
|
|
|
435 |
# 检查逐字稿是否存在
|
436 |
is_new_transcript = False
|
437 |
is_transcript_exists = GCS_SERVICE.check_file_exists(bucket_name, transcript_blob_name)
|
438 |
+
video_duration = get_video_duration(video_id)
|
439 |
if not is_transcript_exists:
|
440 |
print("逐字稿文件不存在于GCS中,重新建立")
|
441 |
# 从YouTube获取逐字稿并上传
|
|
|
451 |
else:
|
452 |
print("沒有找到字幕")
|
453 |
transcript = generate_transcription_by_whisper(video_id)
|
454 |
+
if video_duration:
|
455 |
+
transcript = [entry for entry in transcript if entry['start'] <= video_duration]
|
456 |
+
|
457 |
transcript_text = json.dumps(transcript, ensure_ascii=False, indent=2)
|
458 |
GCS_SERVICE.upload_json_string(bucket_name, transcript_blob_name, transcript_text)
|
459 |
|
|
|
463 |
print("逐字稿已存在于GCS中")
|
464 |
transcript_text = GCS_SERVICE.download_as_string(bucket_name, transcript_blob_name)
|
465 |
transcript = json.loads(transcript_text)
|
466 |
+
if video_duration:
|
467 |
+
transcript = [entry for entry in transcript if entry['start'] <= video_duration]
|
468 |
|
469 |
# print("===確認其他衍生文件===")
|
470 |
# source = "gcs"
|
|
|
658 |
# Create the output directory if it doesn't exist
|
659 |
if not os.path.exists(output_path):
|
660 |
os.makedirs(output_path)
|
661 |
+
|
662 |
# Download the video
|
663 |
+
try:
|
664 |
+
yt = YouTube(youtube_url)
|
665 |
+
video_stream = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first()
|
666 |
+
video_stream.download(output_path=output_path, filename=youtube_id+".mp4")
|
667 |
+
print(f"[Pytube] Video downloaded successfully: {output_path}/{youtube_id}.mp4")
|
668 |
+
except Exception as e:
|
669 |
+
ydl_opts = {
|
670 |
+
'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best', # This ensures the best quality combining video and audio
|
671 |
+
'outtmpl': os.path.join(output_path, f'{youtube_id}.mp4'), # Output filename template
|
672 |
+
}
|
673 |
+
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
674 |
+
ydl.download([youtube_url])
|
675 |
|
676 |
+
print(f"[yt_dlp] Video downloaded successfully: {output_path}/{youtube_id}.mp4")
|
677 |
|
678 |
def screenshot_youtube_video(youtube_id, snapshot_sec):
|
679 |
video_path = f'{OUTPUT_PATH}/{youtube_id}.mp4'
|
|
|
1270 |
print("=====get_questions=====")
|
1271 |
return q1, q2, q3
|
1272 |
|
|
|
1273 |
def get_key_moments(video_id, formatted_simple_transcript, formatted_transcript, source):
|
1274 |
if source == "gcs":
|
1275 |
print("===get_key_moments on gcs===")
|
|
|
1338 |
1. 小範圍切出不同段落的相對應時間軸的重點摘要,
|
1339 |
2. 每一小段最多不超過 1/5 的總內容,也就是大約 3~5段的重點(例如五~十分鐘的影片就一段大約1~2分鐘,最多三分鐘,但如果是超過十分鐘的影片,那一小段大約 2~3分鐘,以此類推)
|
1340 |
3. 注意不要遺漏任何一段時間軸的內容 從零秒開始
|
1341 |
+
4. 如果頭尾的情節不是重點,特別是打招呼或是介紹人物、或是say goodbye 就是不重要的情節,就不用擷取
|
1342 |
+
5. 以這種方式分析整個文本,從零秒開始分析,直到結束。這很重要
|
|
|
1343 |
6. 關鍵字從transcript extract to keyword,保留專家名字、專業術語、年份、數字、期刊名稱、地名、數學公式
|
1344 |
+
7. text, keywords please use or transfer zh-TW, it's very important
|
1345 |
|
1346 |
+
Example: retrun JSON
|
1347 |
+
{{key_moments:[{{
|
1348 |
"start": "00:00",
|
1349 |
"end": "01:00",
|
1350 |
"text": "逐字稿的重點摘要",
|
|
|
1351 |
"keywords": ["關鍵字", "關鍵字"]
|
1352 |
+
}}]
|
1353 |
+
}}
|
1354 |
"""
|
1355 |
|
1356 |
try:
|
|
|
1398 |
response = BEDROCK_CLIENT.invoke_model(**kwargs)
|
1399 |
response_body = json.loads(response.get('body').read())
|
1400 |
response_completion = response_body.get('content')[0].get('text')
|
1401 |
+
print(f"response_completion: {response_completion}")
|
1402 |
+
|
1403 |
key_moments = json.loads(response_completion)["key_moments"]
|
1404 |
|
1405 |
+
# "transcript": get text from formatted_simple_transcript
|
1406 |
+
for moment in key_moments:
|
1407 |
+
start_time = parse_time(moment['start'])
|
1408 |
+
end_time = parse_time(moment['end'])
|
1409 |
+
# 使用轉換後的 timedelta 物件進行時間
|
1410 |
+
moment['transcript'] = ",".join([entry['text'] for entry in formatted_simple_transcript
|
1411 |
+
if start_time <= parse_time(entry['start_time']) <= end_time])
|
1412 |
+
|
1413 |
print("=====key_moments=====")
|
1414 |
print(key_moments)
|
1415 |
print("=====key_moments=====")
|
|
|
2082 |
return word_path
|
2083 |
|
2084 |
# ---- Chatbot ----
|
2085 |
+
def get_instructions(content_subject, content_grade, key_moments, socratic_mode=True):
|
2086 |
+
if socratic_mode:
|
2087 |
+
method = "Socratic style, guide thinking, no direct answers. this is very important, please be seriously following."
|
2088 |
+
else:
|
2089 |
+
method = "direct answers, but encourage user to think more."
|
2090 |
+
|
2091 |
instructions = f"""
|
2092 |
subject: {content_subject}
|
2093 |
grade: {content_grade}
|
2094 |
context: {key_moments}
|
2095 |
Assistant Role: you are a {content_subject} assistant. you can call yourself as {content_subject} 學伴
|
2096 |
User Role: {content_grade} th-grade student.
|
2097 |
+
Method: {method}
|
2098 |
Language: Traditional Chinese ZH-TW (it's very important), suitable for {content_grade} th-grade level.
|
2099 |
Response:
|
2100 |
+
- if user say hi or hello or any greeting, just say hi back and introduce yourself. Then tell user to ask question in context.
|
2101 |
- Single question, under 100 characters
|
2102 |
- include math symbols (use LaTeX $ to cover before and after, ex: $x^2$)
|
2103 |
- hint with video timestamp which format 【參考:00:00:00】.
|
|
|
2108 |
"""
|
2109 |
return instructions
|
2110 |
|
2111 |
+
def chat_with_any_ai(ai_type, password, video_id, user_data, transcript_state, key_moments, user_message, chat_history, content_subject, content_grade, questions_answers_json, socratic_mode=False, thread_id=None, ai_name=None):
|
2112 |
+
print(f"ai_type: {ai_type}")
|
2113 |
+
print(f"user_data: {user_data}")
|
2114 |
verify_password(password)
|
2115 |
+
verify_message_length(user_message, max_length=1500)
|
2116 |
|
2117 |
+
is_questions_answers_exists, question_message, answer_message = check_questions_answers(user_message, questions_answers_json)
|
2118 |
+
if is_questions_answers_exists:
|
2119 |
+
chat_history = update_chat_history(question_message, answer_message, chat_history)
|
2120 |
+
send_btn_update, send_feedback_btn_update = update_send_and_feedback_buttons(chat_history, CHAT_LIMIT)
|
2121 |
+
time.sleep(3)
|
2122 |
|
2123 |
+
return "", chat_history, send_btn_update, send_feedback_btn_update, thread_id
|
2124 |
+
|
2125 |
+
verify_chat_limit(chat_history, CHAT_LIMIT)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2126 |
|
2127 |
+
if ai_type == "chat_completions":
|
2128 |
+
chatbot_config = get_chatbot_config(ai_name, transcript_state, key_moments, content_subject, content_grade, video_id, socratic_mode)
|
2129 |
+
chatbot = Chatbot(chatbot_config)
|
2130 |
+
response_text = chatbot.chat(user_message, chat_history)
|
2131 |
+
thread_id = ""
|
2132 |
+
elif ai_type == "assistant":
|
2133 |
+
client = OPEN_AI_CLIENT
|
2134 |
+
assistant_id = OPEN_AI_ASSISTANT_ID_GPT4 #GPT 4 turbo
|
2135 |
+
if isinstance(key_moments, str):
|
2136 |
+
key_moments_json = json.loads(key_moments)
|
2137 |
+
else:
|
2138 |
+
key_moments_json = key_moments
|
2139 |
+
# key_moments_json remove images
|
2140 |
+
for moment in key_moments_json:
|
2141 |
+
moment.pop('images', None)
|
2142 |
+
moment.pop('end', None)
|
2143 |
+
moment.pop('transcript', None)
|
2144 |
+
key_moments_text = json.dumps(key_moments_json, ensure_ascii=False)
|
2145 |
+
instructions = get_instructions(content_subject, content_grade, key_moments_text, socratic_mode)
|
2146 |
+
print(f"=== instructions:{instructions} ===")
|
2147 |
+
metadata={
|
2148 |
+
"video_id": video_id,
|
2149 |
+
"user_data": user_data,
|
2150 |
+
"content_subject": content_subject,
|
2151 |
+
"content_grade": content_grade,
|
2152 |
+
"socratic_mode": str(socratic_mode),
|
2153 |
+
"assistant_id": assistant_id,
|
2154 |
+
"is_streaming": "false",
|
2155 |
+
}
|
2156 |
+
user_message_note = "/n 請嚴格遵循instructions,擔任一位蘇格拉底家教,絕對不要重複 user 的問句,請用引導的方式指引方向,請一定要用繁體中文回答 zh-TW,並用台灣人的禮貌口語表達,回答時不要特別說明這是台灣人的語氣,請在回答的最後標註【參考:(時):(分):(秒)】,(如果是反問學生,就只問一個問題,請幫助學生更好的理解資料,字數在100字以內,回答時如果講到數學專有名詞,請用數學符號代替文字(Latex 用 $ 字號 render, ex: $x^2$)"
|
2157 |
+
user_content = user_message + user_message_note
|
2158 |
+
response_text, thread_id = handle_conversation_by_open_ai_assistant(client, user_content, instructions, assistant_id, thread_id, metadata, fallback=True)
|
2159 |
|
2160 |
+
# 更新聊天历史
|
2161 |
+
chat_history = update_chat_history(user_message, response_text, chat_history)
|
2162 |
+
send_btn_update, send_feedback_btn_update = update_send_and_feedback_buttons(chat_history, CHAT_LIMIT)
|
2163 |
|
2164 |
+
# 返回聊天历史和空字符串清空输入框
|
2165 |
+
return "", chat_history, send_btn_update, send_feedback_btn_update, thread_id
|
|
|
2166 |
|
2167 |
+
def get_chatbot_config(ai_name, transcript_state, key_moments, content_subject, content_grade, video_id, socratic_mode=True):
|
2168 |
if not ai_name in ["foxcat", "lili", "maimai"]:
|
2169 |
ai_name = "foxcat"
|
2170 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2171 |
ai_name_clients_model = {
|
2172 |
"foxcat": {
|
2173 |
"ai_name": "foxcat",
|
|
|
2179 |
"ai_client": BEDROCK_CLIENT,
|
2180 |
"ai_model_name": "claude3",
|
2181 |
},
|
|
|
|
|
|
|
|
|
|
|
2182 |
"maimai": {
|
2183 |
"ai_name": "maimai",
|
2184 |
"ai_client": GROQ_CLIENT,
|
|
|
2188 |
ai_client = ai_name_clients_model.get(ai_name, "foxcat")["ai_client"]
|
2189 |
ai_model_name = ai_name_clients_model.get(ai_name, "foxcat")["ai_model_name"]
|
2190 |
|
2191 |
+
if isinstance(transcript_state, str):
|
2192 |
+
simple_transcript = json.loads(transcript_state)
|
2193 |
else:
|
2194 |
+
simple_transcript = transcript_state
|
2195 |
|
2196 |
if isinstance(key_moments, str):
|
2197 |
key_moments_json = json.loads(key_moments)
|
|
|
2204 |
moment.pop('transcript', None)
|
2205 |
key_moments_text = json.dumps(key_moments_json, ensure_ascii=False)
|
2206 |
|
2207 |
+
instructions = get_instructions(content_subject, content_grade, key_moments_text, socratic_mode)
|
2208 |
|
2209 |
chatbot_config = {
|
2210 |
"video_id": video_id,
|
|
|
2218 |
"instructions": instructions
|
2219 |
}
|
2220 |
|
2221 |
+
return chatbot_config
|
2222 |
+
|
2223 |
+
def feedback_with_ai(ai_type, chat_history, thread_id=None):
|
2224 |
+
# prompt: 請依據以上的對話(chat_history),總結我的「提問力」,並給予我是否有「問對問題」的回饋和建議
|
2225 |
+
system_content = """
|
2226 |
+
你是一個擅長引導問答素養的老師,user 為學生的提問跟回答,請精讀對話過程,針對 user 給予回饋就好,根據以下 Rule:
|
2227 |
+
- 請使用繁體中文 zh-TW 總結 user 的提問力,並給予是否有問對問題的回饋和建議
|
2228 |
+
- 不採計【預設提問】的問題,如果 user 的提問都來自【預設提問】,表達用戶善於使用系統,請給予回饋並鼓勵 user 親自提問更具體的問題
|
2229 |
+
- 如果用戶提問都相當簡短,甚至就是一個字或都是一個數字(像是 user: 1, user:2),請給予回饋並建議 user 提問更具體的問題
|
2230 |
+
- 如果用戶提問內容只有符號或是亂碼,像是?,!, ..., 3bhwbqhfw2vve2 等,請給予回饋並建議 user 提問更具體的問題
|
2231 |
+
- 如果用戶提問內容有色情、暴力、仇恨、不當言論等,請給予嚴厲的回饋並建議 user 提問更具體的問題
|
2232 |
+
- 並用第二人稱「你」來代表 user
|
2233 |
+
- 請禮貌,並給予鼓勵
|
2234 |
+
"""
|
2235 |
+
chat_history_conversation = ""
|
2236 |
+
# 標註 user and assistant as string
|
2237 |
+
# chat_history 第一組不採計
|
2238 |
+
for chat in chat_history[1:]:
|
2239 |
+
user_message = chat[0]
|
2240 |
+
assistant_message = chat[1]
|
2241 |
+
chat_history_conversation += f"User: {user_message}\nAssistant: {assistant_message}\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2242 |
|
2243 |
+
feedback_request_message = "請依據以上的對話,總結我的「提問力」,並給予我是否有「問對問題」的回饋和建議"
|
2244 |
+
user_content = f"""conversation: {chat_history_conversation}
|
2245 |
+
{feedback_request_message}
|
2246 |
+
最後根據提問力表現,給予提問建議、提問表現,並用 emoji 來表示評分:
|
2247 |
+
🟢:(表現很好的回饋,給予正向肯定)
|
2248 |
+
🟡:(還可以加油的的回饋,給予明確的建議)
|
2249 |
+
🔴:(非常不懂提問的回饋,給予鼓勵並給出明確示範)
|
2250 |
+
|
2251 |
+
example:
|
2252 |
+
另一方面,你表達「我不想學了」這個情感,其實也是一種重要的反饋。這顯示你可能感到挫折或疲倦。在這種情況下,表達出你的感受是好的,但如果能具體說明是什麼讓你感到這樣,或是有什麼具體的學習障礙,會更有助於找到解決方案。
|
2253 |
+
給予你的建議是,嘗試在提問時更明確一些,這樣不僅能幫助你獲得更好的學習支持,也能提高你的問題解決技巧。
|
2254 |
+
......
|
2255 |
+
提問建議:在提問時,試著具體並清晰地表達你的需求和疑惑,這樣能更有效地得到幫助。
|
2256 |
+
提問表現:【🟡】加油,持續練習,你的提問力會越來越好!
|
2257 |
+
"""
|
|
|
|
|
|
|
2258 |
|
2259 |
+
client = OPEN_AI_CLIENT
|
2260 |
|
2261 |
+
if ai_type == "chat_completions":
|
2262 |
+
model_name = "gpt-4-turbo"
|
2263 |
+
response_text = handle_conversation_by_open_ai_chat_completions(client, model_name, user_content, system_content)
|
2264 |
+
elif ai_type == "assistant":
|
2265 |
+
assistant_id = OPEN_AI_ASSISTANT_ID_GPT4 #GPT 4 turbo
|
2266 |
+
# assistant_id = OPEN_AI_ASSISTANT_ID_GPT3 #GPT 3.5 turbo
|
2267 |
+
response_text, thread_id = handle_conversation_by_open_ai_assistant(client, user_content, system_content, assistant_id, thread_id, metadata=None, fallback=True)
|
2268 |
|
2269 |
+
chat_history = update_chat_history(feedback_request_message, response_text, chat_history)
|
2270 |
+
feedback_btn_update = gr.update(value="已回饋", interactive=False, variant="secondary")
|
|
|
|
|
2271 |
|
2272 |
+
return chat_history, feedback_btn_update
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2273 |
|
2274 |
+
def handle_conversation_by_open_ai_chat_completions(client, model_name, user_content, system_content):
|
2275 |
+
response = client.chat.completions.create(
|
2276 |
+
model=model_name,
|
2277 |
+
messages=[
|
2278 |
+
{"role": "system", "content": system_content},
|
2279 |
+
{"role": "user", "content": user_content}
|
2280 |
+
],
|
2281 |
+
max_tokens=4000,
|
2282 |
+
)
|
2283 |
+
response_text = response.choices[0].message.content.strip()
|
2284 |
+
return response_text
|
|
|
|
|
|
|
2285 |
|
2286 |
+
def handle_conversation_by_open_ai_assistant(client, user_message, instructions, assistant_id, thread_id=None, metadata=None, fallback=False):
|
2287 |
+
"""
|
2288 |
+
Handles the creation and management of a conversation thread.
|
2289 |
+
:param client: The OpenAI client object.
|
2290 |
+
:param thread_id: The existing thread ID, if any.
|
2291 |
+
:param user_message: The message from the user.
|
2292 |
+
:param instructions: System instructions for the assistant.
|
2293 |
+
:param assistant_id: ID of the assistant to use.
|
2294 |
+
:param metadata: Additional metadata to add to the thread.
|
2295 |
+
:param fallback: Whether to use a fallback method in case of failure.
|
2296 |
+
:return: A string with the response text or an error message.
|
2297 |
+
"""
|
2298 |
+
try:
|
2299 |
if not thread_id:
|
2300 |
+
thread = client.beta.threads.create()
|
|
|
|
|
2301 |
thread_id = thread.id
|
2302 |
else:
|
2303 |
thread = client.beta.threads.retrieve(thread_id)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2304 |
|
2305 |
+
if metadata:
|
2306 |
+
client.beta.threads.update(thread_id=thread.id, metadata=metadata)
|
|
|
|
|
|
|
|
|
2307 |
|
2308 |
+
# Send the user message to the thread
|
2309 |
+
client.beta.threads.messages.create(thread_id=thread.id, role="user", content=user_message)
|
|
|
|
|
|
|
|
|
2310 |
|
2311 |
+
# Run the assistant
|
2312 |
+
run = client.beta.threads.runs.create(thread_id=thread.id, assistant_id=assistant_id, instructions=instructions)
|
2313 |
+
|
2314 |
+
# Wait for the response
|
2315 |
run_status = poll_run_status(run.id, thread.id, timeout=30)
|
2316 |
+
|
2317 |
if run_status == "completed":
|
2318 |
messages = client.beta.threads.messages.list(thread_id=thread.id)
|
|
|
2319 |
response_text = messages.data[0].content[0].text.value
|
2320 |
else:
|
2321 |
response_text = "學習精靈有點累,請稍後再試!"
|
2322 |
|
|
|
|
|
|
|
|
|
|
|
|
|
2323 |
except Exception as e:
|
2324 |
+
if fallback:
|
2325 |
+
response = client.chat.completions.create(
|
2326 |
+
model="gpt-4-turbo",
|
2327 |
+
messages=[
|
2328 |
+
{"role": "system", "content": instructions},
|
2329 |
+
{"role": "user", "content": user_message}
|
2330 |
+
],
|
2331 |
+
max_tokens=4000,
|
2332 |
+
)
|
2333 |
+
response_text = response.choices[0].message.content.strip()
|
2334 |
+
else:
|
2335 |
+
print(f"Error: {e}")
|
2336 |
+
raise gr.Error(f"Error: {e}")
|
2337 |
|
2338 |
+
return response_text, thread_id
|
2339 |
+
|
2340 |
+
def verify_message_length(user_message, max_length=500):
|
2341 |
+
# 驗證用戶消息的長度
|
2342 |
+
if len(user_message) > max_length:
|
2343 |
+
error_msg = "你的訊息太長了,請縮短訊息長度至五百字以內"
|
2344 |
+
raise gr.Error(error_msg)
|
2345 |
+
|
2346 |
+
def check_questions_answers(user_message, questions_answers_json):
|
2347 |
+
"""檢查問答是否存在,並處理相關邏輯"""
|
2348 |
+
is_questions_answers_exists = False
|
2349 |
+
answer = ""
|
2350 |
+
# 解析問答數據
|
2351 |
+
if isinstance(questions_answers_json, str):
|
2352 |
+
qa_data = json.loads(questions_answers_json)
|
2353 |
+
else:
|
2354 |
+
qa_data = questions_answers_json
|
2355 |
+
|
2356 |
+
question_message = ""
|
2357 |
+
answer_message = ""
|
2358 |
+
for qa in qa_data:
|
2359 |
+
if user_message == qa["question"] and qa["answer"]:
|
2360 |
+
is_questions_answers_exists = True
|
2361 |
+
question_message = f"【預設問題】{user_message}"
|
2362 |
+
answer_message = qa["answer"]
|
2363 |
+
print("=== in questions_answers_json==")
|
2364 |
+
print(f"question: {qa['question']}")
|
2365 |
+
print(f"answer: {answer_message}")
|
2366 |
+
break # 匹配到答案後退出循環
|
2367 |
+
|
2368 |
+
return is_questions_answers_exists, question_message, answer_message
|
2369 |
+
|
2370 |
+
def verify_chat_limit(chat_history, chat_limit):
|
2371 |
+
if chat_history is not None and len(chat_history) > chat_limit:
|
2372 |
+
error_msg = "此次對話超過上限(對話一輪10次)"
|
2373 |
+
raise gr.Error(error_msg)
|
2374 |
+
|
2375 |
+
def update_chat_history(user_message, response, chat_history):
|
2376 |
+
# 更新聊天歷史的邏輯
|
2377 |
+
new_chat_history = (user_message, response)
|
2378 |
+
if chat_history is None:
|
2379 |
+
chat_history = [new_chat_history]
|
2380 |
+
else:
|
2381 |
+
chat_history.append(new_chat_history)
|
2382 |
+
|
2383 |
+
return chat_history
|
2384 |
+
|
2385 |
+
def update_send_and_feedback_buttons(chat_history, chat_limit):
|
2386 |
+
# 计算发送次数
|
2387 |
+
send_count = len(chat_history) - 1
|
2388 |
+
|
2389 |
+
# 根据聊天历史长度更新发送按钮和反馈按钮
|
2390 |
+
if len(chat_history) > chat_limit:
|
2391 |
+
send_btn_value = f"對話上限 ({send_count}/{chat_limit})"
|
2392 |
+
send_btn_update = gr.update(value=send_btn_value, interactive=False)
|
2393 |
+
send_feedback_btn_update = gr.update(visible=True)
|
2394 |
+
else:
|
2395 |
+
send_btn_value = f"發送 ({send_count}/{chat_limit})"
|
2396 |
+
send_btn_update = gr.update(value=send_btn_value, interactive=True)
|
2397 |
+
send_feedback_btn_update = gr.update(visible=False)
|
2398 |
+
|
2399 |
+
return send_btn_update, send_feedback_btn_update
|
2400 |
|
2401 |
def process_open_ai_audio_to_chatbot(password, audio_url):
|
2402 |
verify_password(password)
|
|
|
2406 |
if file_size > 2000000:
|
2407 |
raise gr.Error("檔案大小超過,請不要超過 60秒")
|
2408 |
else:
|
2409 |
+
transcription = OPEN_AI_CLIENT.audio.transcriptions.create(
|
2410 |
model="whisper-1",
|
2411 |
file=audio_file,
|
2412 |
response_format="text"
|
2413 |
)
|
2414 |
# response 拆解 dict
|
2415 |
+
print("=== transcription ===")
|
2416 |
+
print(transcription)
|
2417 |
+
print("=== transcription ===")
|
2418 |
+
# 確認 response 是否有數學符號,prompt to LATEX $... $, ex: $x^2$
|
2419 |
+
|
2420 |
+
if transcription:
|
2421 |
+
system_message = """你是專業的 LATEX 轉換師,擅長將數學符號、公式轉換成 LATEX 格式,並用 LATEX 符號 $...$ 包裹,ex: $x^2$
|
2422 |
+
範例:
|
2423 |
+
transcription: x的平方加 2x 加 1 等於 0
|
2424 |
+
轉成 LATEX 格式:$x^2 + 2x + 1 = 0$
|
2425 |
+
"""
|
2426 |
+
user_message = f"""transcription: {transcription}
|
2427 |
+
請將 transcription 內的數學、公式、運算式、化學式、物理 formula 內容轉換成 LATEX 格式
|
2428 |
+
其他文字都保留原樣
|
2429 |
+
也不要給出多餘的敘述
|
2430 |
+
"""
|
2431 |
+
request = OPEN_AI_CLIENT.chat.completions.create(
|
2432 |
+
model="gpt-4-turbo",
|
2433 |
+
messages=[
|
2434 |
+
{"role": "system", "content": system_message},
|
2435 |
+
{"role": "user", "content": user_message}
|
2436 |
+
],
|
2437 |
+
max_tokens=4000,
|
2438 |
+
)
|
2439 |
+
response = request.choices[0].message.content.strip()
|
2440 |
else:
|
2441 |
response = ""
|
2442 |
|
|
|
2491 |
|
2492 |
return run.status
|
2493 |
|
2494 |
+
def chat_with_opan_ai_assistant_streaming(user_message, chat_history, password, video_id, user_data, thread_id, trascript, key_moments, content_subject, content_grade, socratic_mode=True):
|
2495 |
verify_password(password)
|
2496 |
|
2497 |
print("=====user_data=====")
|
|
|
2506 |
raise gr.Error(error_msg)
|
2507 |
|
2508 |
# 如果 chat_history 超過 10 則訊息,直接 return "對話超過上限"
|
2509 |
+
if chat_history is not None and len(chat_history) > CHAT_LIMIT:
|
2510 |
+
error_msg = f"此次對話超過上限(對話一輪{CHAT_LIMIT}次)"
|
2511 |
raise gr.Error(error_msg)
|
2512 |
|
2513 |
try:
|
2514 |
+
assistant_id = OPEN_AI_ASSISTANT_ID_GPT4 #GPT 4 turbo
|
2515 |
+
# assistant_id = OPEN_AI_ASSISTANT_ID_GPT3 #GPT 3.5 turbo
|
2516 |
+
|
2517 |
client = OPEN_AI_CLIENT
|
2518 |
# 直接安排逐字稿資料 in instructions
|
2519 |
# if isinstance(trascript, str):
|
|
|
2535 |
moment.pop('transcript', None)
|
2536 |
key_moments_text = json.dumps(key_moments_json, ensure_ascii=False)
|
2537 |
|
2538 |
+
instructions = get_instructions(content_subject, content_grade, key_moments_text, socratic_mode)
|
2539 |
# 创建线程
|
2540 |
if not thread_id:
|
2541 |
thread = client.beta.threads.create()
|
|
|
2589 |
def chatbot_select(chatbot_name):
|
2590 |
chatbot_select_accordion_visible = gr.update(visible=False)
|
2591 |
all_chatbot_select_btn_visible = gr.update(visible=True)
|
|
|
2592 |
chatbot_open_ai_streaming_visible = gr.update(visible=False)
|
2593 |
+
chatbot_ai_visible = gr.update(visible=False)
|
2594 |
ai_name_update = gr.update(value="foxcat")
|
2595 |
+
ai_chatbot_thread_id_update = gr.update(value="")
|
2596 |
+
|
2597 |
if chatbot_name == "chatbot_open_ai":
|
2598 |
+
chatbot_ai_visible = gr.update(visible=True)
|
2599 |
+
ai_chatbot_ai_type_update = gr.update(value="assistant")
|
2600 |
elif chatbot_name == "chatbot_open_ai_streaming":
|
2601 |
chatbot_open_ai_streaming_visible = gr.update(visible=True)
|
2602 |
+
ai_chatbot_ai_type_update = gr.update(value="assistant_streaming")
|
2603 |
else:
|
2604 |
+
chatbot_ai_visible = gr.update(visible=True)
|
2605 |
+
ai_chatbot_ai_type_update = gr.update(value="chat_completions")
|
2606 |
+
|
2607 |
+
ai_name_update = gr.update(value=chatbot_name)
|
2608 |
|
2609 |
+
return chatbot_select_accordion_visible, all_chatbot_select_btn_visible, \
|
2610 |
+
chatbot_open_ai_streaming_visible, chatbot_ai_visible, \
|
2611 |
+
ai_name_update, ai_chatbot_ai_type_update, ai_chatbot_thread_id_update
|
2612 |
|
2613 |
+
def update_avatar_images(avatar_images, chatbot_description_value):
|
2614 |
value = [[
|
2615 |
"請問你是誰?",
|
2616 |
+
chatbot_description_value
|
2617 |
]]
|
2618 |
ai_chatbot_update = gr.update(avatar_images=avatar_images, value=value)
|
2619 |
return ai_chatbot_update
|
|
|
2671 |
lesson_plan_accordion = gr.update(visible=True)
|
2672 |
exit_ticket_accordion = gr.update(visible=True)
|
2673 |
|
|
|
2674 |
chatbot_open_ai_streaming = gr.update(visible=False)
|
2675 |
+
chatbot_ai = gr.update(visible=False)
|
2676 |
+
ai_chatbot_params = gr.update(visible=True)
|
2677 |
|
2678 |
# if youtube_link in query_params
|
2679 |
if "youtube_id" in request.query_params:
|
|
|
2692 |
worksheet_accordion = gr.update(visible=False)
|
2693 |
lesson_plan_accordion = gr.update(visible=False)
|
2694 |
exit_ticket_accordion = gr.update(visible=False)
|
2695 |
+
ai_chatbot_params = gr.update(visible=False)
|
2696 |
|
2697 |
return admin, reading_passage_admin, summary_admin, see_detail, \
|
2698 |
worksheet_accordion, lesson_plan_accordion, exit_ticket_accordion, \
|
2699 |
password_text, youtube_link, \
|
2700 |
+
chatbot_open_ai_streaming, chatbot_ai, ai_chatbot_params
|
2701 |
|
2702 |
def update_state(content_subject, content_grade, trascript, key_moments, questions_answers):
|
2703 |
# inputs=[content_subject, content_grade, df_string_output],
|
|
|
2714 |
question_1 = questions_answers_json[0]["question"]
|
2715 |
question_2 = questions_answers_json[1]["question"]
|
2716 |
question_3 = questions_answers_json[2]["question"]
|
|
|
|
|
|
|
2717 |
ai_chatbot_question_1 = question_1
|
2718 |
ai_chatbot_question_2 = question_2
|
2719 |
ai_chatbot_question_3 = question_3
|
2720 |
|
2721 |
return content_subject_state, content_grade_state, trascript_state, key_moments_state, \
|
2722 |
streaming_chat_thread_id_state, \
|
|
|
2723 |
ai_chatbot_question_1, ai_chatbot_question_2, ai_chatbot_question_3
|
2724 |
|
2725 |
|
|
|
2748 |
});
|
2749 |
}
|
2750 |
</script>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2751 |
"""
|
2752 |
|
2753 |
with gr.Blocks(theme=gr.themes.Base(primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.amber, text_size = gr.themes.sizes.text_lg), head=HEAD) as demo:
|
|
|
2768 |
with gr.Tab("AI小精靈"):
|
2769 |
with gr.Row():
|
2770 |
all_chatbot_select_btn = gr.Button("選擇 AI 小精靈 👈", elem_id="all_chatbot_select_btn", visible=False, variant="secondary", size="sm")
|
2771 |
+
with gr.Row() as ai_chatbot_params:
|
2772 |
+
ai_name = gr.Dropdown(
|
2773 |
+
label="選擇 AI 助理",
|
2774 |
+
choices=[
|
2775 |
+
("飛特精靈","chatbot_open_ai"),
|
2776 |
+
("飛特音速","chatbot_open_ai_streaming"),
|
2777 |
+
("梨梨","lili"),
|
2778 |
+
("麥麥","maimai"),
|
2779 |
+
("狐狸貓","foxcat")
|
2780 |
+
],
|
2781 |
+
value="foxcat",
|
2782 |
+
visible=True
|
2783 |
+
)
|
2784 |
+
ai_chatbot_ai_type = gr.Textbox(value="chat_completions", visible=True)
|
2785 |
+
ai_chatbot_thread_id = gr.Textbox(label="thread_id", visible=True)
|
2786 |
+
ai_chatbot_socratic_mode_btn = gr.Checkbox(label="蘇格拉底家教助理模式", value=True, visible=True)
|
2787 |
+
latex_delimiters = [{"left": "$", "right": "$", "display": False}]
|
2788 |
with gr.Accordion("選擇 AI 小精靈", elem_id="chatbot_select_accordion") as chatbot_select_accordion:
|
2789 |
with gr.Row():
|
2790 |
user_avatar = "https://em-content.zobj.net/source/google/263/flushed-face_1f633.png"
|
2791 |
+
# 飛特精靈
|
2792 |
+
with gr.Column(scale=1, variant="panel", visible=True):
|
2793 |
+
vaitor_chatbot_avatar_url = "https://junyitopicimg.s3.amazonaws.com/s4byy--icon.jpe?v=20200513013523726"
|
2794 |
+
vaitor_chatbot_avatar_images = gr.State([user_avatar, vaitor_chatbot_avatar_url])
|
2795 |
+
vaitor_chatbot_description = """Hi,我是你的AI學伴【飛特精靈】,\n
|
2796 |
我可以陪你一起學習本次的內容,有什麼問題都可以問我喔!\n
|
2797 |
🤔 如果你不知道怎麼發問,可以點擊左下方的問題一、問題二、問題三,我會幫你生成問題!\n
|
2798 |
🗣️ 也可以點擊右下方用語音輸入,我會幫你轉換成文字,厲害吧!\n
|
|
|
2801 |
🦄 如果達到上限,或是遇到精靈很累,請問問其他朋友,像是飛特音速說話的速度比較快,你是否跟得上呢?你也可以和其他精靈互動看看喔!\n
|
2802 |
"""
|
2803 |
chatbot_open_ai_name = gr.State("chatbot_open_ai")
|
2804 |
+
gr.Image(value=vaitor_chatbot_avatar_url, height=100, width=100, show_label=False, show_download_button=False)
|
2805 |
+
vaitor_chatbot_select_btn = gr.Button("👆選擇【飛特精靈】", elem_id="chatbot_btn", visible=True, variant="primary")
|
2806 |
+
vaitor_chatbot_description_value = gr.Markdown(value=vaitor_chatbot_description, visible=True)
|
2807 |
+
# 狐狸貓
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2808 |
with gr.Column(scale=1, variant="panel"):
|
2809 |
foxcat_chatbot_avatar_url = "https://storage.googleapis.com/wpassets.junyiacademy.org/1/2020/06/%E7%A7%91%E5%AD%B8%E5%BE%BD%E7%AB%A0-2-150x150.png"
|
2810 |
foxcat_avatar_images = gr.State([user_avatar, foxcat_chatbot_avatar_url])
|
|
|
2849 |
gr.Image(value=maimai_chatbot_avatar_url, height=100, width=100, show_label=False, show_download_button=False)
|
2850 |
maimai_chatbot_select_btn = gr.Button("👆選擇【麥麥】", visible=True, variant="primary", elem_classes="chatbot_select_btn")
|
2851 |
maimai_chatbot_description_value = gr.Markdown(value=maimai_chatbot_description, visible=True)
|
2852 |
+
# 飛特音速
|
2853 |
+
with gr.Column(scale=1, variant="panel", visible=True):
|
2854 |
+
streaming_chatbot_avatar_url = "https://storage.googleapis.com/wpassets.junyiacademy.org/1/2020/11/1-%E6%98%9F%E7%A9%BA%E9%A0%AD%E8%B2%BC-%E5%A4%AA%E7%A9%BA%E7%8B%90%E7%8B%B8%E8%B2%93-150x150.png"
|
2855 |
+
streaming_chatbot_description = """Hi,我是【飛特音速】, \n
|
2856 |
+
說話比較快,但有什麼問題都可以問我喔! \n
|
2857 |
+
🚀 我沒有預設問題、也沒有語音輸入,適合快問快答,一起練習問出好問題吧 \n
|
2858 |
+
🔠 擅長用文字表達的你,可以用鍵盤輸入你的問題,我會盡力回答你的問題喔\n
|
2859 |
+
💤 我還在成長,體力有限,每一次學習只能回答十個問題,請讓我休息一下再問問題喔~
|
2860 |
+
"""
|
2861 |
+
chatbot_open_ai_streaming_name = gr.State("chatbot_open_ai_streaming")
|
2862 |
+
gr.Image(value=streaming_chatbot_avatar_url, height=100, width=100, show_label=False, show_download_button=False)
|
2863 |
+
chatbot_open_ai_streaming_select_btn = gr.Button("👆選擇【飛特音速】", elem_id="streaming_chatbot_btn", visible=True, variant="primary")
|
2864 |
+
gr.Markdown(value=streaming_chatbot_description, visible=True)
|
2865 |
+
# 尚未開放
|
2866 |
+
with gr.Column(scale=1, variant="panel"):
|
2867 |
+
gr.Markdown(value="### 尚未開放", visible=True)
|
2868 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2869 |
with gr.Row("飛特音速") as chatbot_open_ai_streaming:
|
2870 |
with gr.Column():
|
2871 |
streaming_chat_greeting = """
|
|
|
2874 |
🔠 鍵盤輸入你的問題,我會盡力回答你的問題喔!\n
|
2875 |
💤 我還在成長,體力有限,每一次學習只能回答十個問題,請讓我休息一下再問問題喔!
|
2876 |
"""
|
2877 |
+
additional_inputs = [password, video_id, user_data, streaming_chat_thread_id_state, trascript_state, key_moments_state, content_subject_state, content_grade_state, ai_chatbot_socratic_mode_btn]
|
2878 |
streaming_chat = gr.ChatInterface(
|
2879 |
fn=chat_with_opan_ai_assistant_streaming,
|
2880 |
additional_inputs=additional_inputs,
|
|
|
2885 |
stop_btn=None,
|
2886 |
description=streaming_chat_greeting
|
2887 |
)
|
2888 |
+
with gr.Row("一般精靈") as chatbot_ai:
|
2889 |
with gr.Column():
|
2890 |
ai_chatbot_greeting = [[
|
2891 |
"請問你是誰?",
|
|
|
2896 |
💤 精靈們體力都有限,每一次學習只能回答十個問題,請讓我休息一下再問問題喔!
|
2897 |
""",
|
2898 |
]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2899 |
with gr.Row():
|
2900 |
+
ai_chatbot = gr.Chatbot(label="ai_chatbot", show_share_button=False, likeable=True, show_label=False, latex_delimiters=latex_delimiters, value=ai_chatbot_greeting)
|
2901 |
+
with gr.Row():
|
2902 |
+
with gr.Accordion("你也有類似的問題想問嗎? 請按下 ◀︎", open=False) as ask_questions_accordion_2:
|
2903 |
ai_chatbot_question_1 = gr.Button("問題一")
|
2904 |
ai_chatbot_question_2 = gr.Button("問題一")
|
2905 |
ai_chatbot_question_3 = gr.Button("問題一")
|
2906 |
+
create_questions_btn = gr.Button("生成問題", variant="primary")
|
2907 |
ai_chatbot_audio_input = gr.Audio(sources=["microphone"], type="filepath", max_length=60, label="語音輸入")
|
2908 |
with gr.Row():
|
2909 |
ai_msg = gr.Textbox(label="訊息輸入",scale=3)
|
2910 |
+
ai_send_button = gr.Button("送出", variant="primary",scale=1)
|
2911 |
+
ai_send_feedback_btn = gr.Button("提問力回饋", variant="primary", scale=1, visible=False)
|
2912 |
with gr.Tab("文章模式"):
|
2913 |
with gr.Row():
|
2914 |
reading_passage = gr.Markdown(show_label=False, latex_delimiters = [{"left": "$", "right": "$", "display": False}])
|
|
|
3092 |
with gr.Tab("心智圖",elem_id="mind_map_tab"):
|
3093 |
mind_map_html = gr.HTML()
|
3094 |
|
|
|
|
|
|
|
3095 |
# OPEN AI CHATBOT SELECT
|
3096 |
+
chatbot_select_outputs=[
|
3097 |
+
chatbot_select_accordion,
|
3098 |
+
all_chatbot_select_btn,
|
3099 |
+
chatbot_open_ai_streaming,
|
3100 |
+
chatbot_ai,
|
3101 |
+
ai_name,
|
3102 |
+
ai_chatbot_ai_type,
|
3103 |
+
ai_chatbot_thread_id
|
3104 |
+
]
|
3105 |
+
# 聊天机器人的配置数据
|
3106 |
+
chatbots = [
|
3107 |
+
{
|
3108 |
+
"button": vaitor_chatbot_select_btn,
|
3109 |
+
"name_state": chatbot_open_ai_name,
|
3110 |
+
"avatar_images": vaitor_chatbot_avatar_images,
|
3111 |
+
"description_value": vaitor_chatbot_description_value,
|
3112 |
+
"chatbot_select_outputs": chatbot_select_outputs,
|
3113 |
+
"chatbot_output": ai_chatbot
|
3114 |
+
},
|
3115 |
+
{
|
3116 |
+
"button": foxcat_chatbot_select_btn,
|
3117 |
+
"name_state": foxcat_chatbot_name,
|
3118 |
+
"avatar_images": foxcat_avatar_images,
|
3119 |
+
"description_value": foxcat_chatbot_description_value,
|
3120 |
+
"chatbot_select_outputs": chatbot_select_outputs,
|
3121 |
+
"chatbot_output": ai_chatbot
|
3122 |
+
},
|
3123 |
+
{
|
3124 |
+
"button": lili_chatbot_select_btn,
|
3125 |
+
"name_state": lili_chatbot_name,
|
3126 |
+
"avatar_images": lili_avatar_images,
|
3127 |
+
"description_value": lili_chatbot_description_value,
|
3128 |
+
"chatbot_select_outputs": chatbot_select_outputs,
|
3129 |
+
"chatbot_output": ai_chatbot
|
3130 |
+
},
|
3131 |
+
{
|
3132 |
+
"button": maimai_chatbot_select_btn,
|
3133 |
+
"name_state": maimai_chatbot_name,
|
3134 |
+
"avatar_images": maimai_avatar_images,
|
3135 |
+
"description_value": maimai_chatbot_description_value,
|
3136 |
+
"chatbot_select_outputs": chatbot_select_outputs,
|
3137 |
+
"chatbot_output": ai_chatbot
|
3138 |
+
}
|
3139 |
+
]
|
3140 |
+
|
3141 |
+
def setup_chatbot_select_button(chatbot_dict):
|
3142 |
+
button = chatbot_dict["button"]
|
3143 |
+
chatbot_name_state = chatbot_dict["name_state"]
|
3144 |
+
avatar_images = chatbot_dict["avatar_images"]
|
3145 |
+
description_value = chatbot_dict["description_value"]
|
3146 |
+
chatbot_select_outputs = chatbot_dict["chatbot_select_outputs"]
|
3147 |
+
chatbot_output = chatbot_dict["chatbot_output"]
|
3148 |
+
button.click(
|
3149 |
+
chatbot_select, # 你可能需要修改这个函数以适应当前的逻辑
|
3150 |
+
inputs=[chatbot_name_state],
|
3151 |
+
outputs=chatbot_select_outputs
|
3152 |
+
).then(
|
3153 |
+
update_avatar_images,
|
3154 |
+
inputs=[avatar_images, description_value],
|
3155 |
+
outputs=[chatbot_output],
|
3156 |
+
scroll_to_output=True
|
3157 |
+
)
|
3158 |
+
|
3159 |
+
for chatbot_dict in chatbots:
|
3160 |
+
setup_chatbot_select_button(chatbot_dict)
|
3161 |
+
|
3162 |
+
# STREAMING CHATBOT SELECT
|
3163 |
chatbot_open_ai_streaming_select_btn.click(
|
3164 |
chatbot_select,
|
3165 |
inputs=[chatbot_open_ai_streaming_name],
|
|
|
3169 |
inputs=[],
|
3170 |
outputs=[streaming_chat_thread_id_state]
|
3171 |
)
|
3172 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3173 |
# ALL CHATBOT SELECT LIST
|
3174 |
all_chatbot_select_btn.click(
|
3175 |
show_all_chatbot_accordion,
|
3176 |
inputs=[],
|
3177 |
outputs=[chatbot_select_accordion, all_chatbot_select_btn]
|
3178 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3179 |
# OPENAI ASSISTANT CHATBOT 連接按鈕點擊事件
|
3180 |
def setup_question_button_click(button, inputs_list, outputs_list, chat_func, scroll_to_output=True):
|
3181 |
button.click(
|
|
|
3184 |
outputs=outputs_list,
|
3185 |
scroll_to_output=scroll_to_output
|
3186 |
)
|
3187 |
+
|
3188 |
+
# 其他精靈 ai_chatbot 模式
|
3189 |
+
ai_send_button.click(
|
3190 |
+
chat_with_any_ai,
|
3191 |
+
inputs=[ai_chatbot_ai_type, password, video_id, user_data, trascript_state, key_moments, ai_msg, ai_chatbot, content_subject, content_grade, questions_answers_json, ai_chatbot_socratic_mode_btn, ai_chatbot_thread_id, ai_name],
|
3192 |
+
outputs=[ai_msg, ai_chatbot, ai_send_button, ai_send_feedback_btn, ai_chatbot_thread_id],
|
3193 |
+
scroll_to_output=True
|
3194 |
+
)
|
3195 |
+
ai_send_feedback_btn.click(
|
3196 |
+
feedback_with_ai,
|
3197 |
+
inputs=[ai_chatbot_ai_type, ai_chatbot, ai_chatbot_thread_id],
|
3198 |
+
outputs=[ai_chatbot, ai_send_feedback_btn],
|
3199 |
+
scroll_to_output=True
|
3200 |
+
)
|
3201 |
+
# 其他精靈 ai_chatbot 连接 QA 按钮点击事件
|
3202 |
+
ai_chatbot_question_buttons = [ai_chatbot_question_1, ai_chatbot_question_2, ai_chatbot_question_3]
|
3203 |
+
for question_btn in ai_chatbot_question_buttons:
|
3204 |
+
inputs_list = [ai_chatbot_ai_type, password, video_id, user_data, trascript_state, key_moments, question_btn, ai_chatbot, content_subject, content_grade, questions_answers_json, ai_chatbot_socratic_mode_btn, ai_chatbot_thread_id, ai_name]
|
3205 |
+
outputs_list = [ai_msg, ai_chatbot, ai_send_button, ai_send_feedback_btn, ai_chatbot_thread_id]
|
3206 |
+
setup_question_button_click(question_btn, inputs_list, outputs_list, chat_with_any_ai)
|
3207 |
|
3208 |
# 為生成問題按鈕設定特殊的點擊事件
|
3209 |
+
question_buttons = [
|
3210 |
+
ai_chatbot_question_1,
|
3211 |
+
ai_chatbot_question_2,
|
3212 |
+
ai_chatbot_question_3
|
3213 |
+
]
|
3214 |
+
create_questions_btn.click(
|
3215 |
change_questions,
|
3216 |
inputs=[password, df_string_output],
|
3217 |
outputs=question_buttons
|
3218 |
)
|
3219 |
+
ai_chatbot_audio_input.change(
|
3220 |
+
process_open_ai_audio_to_chatbot,
|
3221 |
+
inputs=[password, ai_chatbot_audio_input],
|
3222 |
+
outputs=[ai_msg]
|
|
|
|
|
|
|
3223 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3224 |
|
3225 |
# 当输入 YouTube 链接时触发
|
3226 |
process_youtube_link_inputs = [password, youtube_link]
|
|
|
3256 |
trascript_state,
|
3257 |
key_moments_state,
|
3258 |
streaming_chat_thread_id_state,
|
|
|
|
|
|
|
3259 |
ai_chatbot_question_1,
|
3260 |
ai_chatbot_question_2,
|
3261 |
ai_chatbot_question_3
|
|
|
3587 |
exit_ticket_accordion,
|
3588 |
password,
|
3589 |
youtube_link,
|
|
|
3590 |
chatbot_open_ai_streaming,
|
3591 |
+
chatbot_ai,
|
3592 |
+
ai_chatbot_params,
|
3593 |
]
|
3594 |
demo.load(
|
3595 |
init_params,
|
chatbot.py
CHANGED
@@ -10,7 +10,7 @@ class Chatbot:
|
|
10 |
self.jutor_chat_key = config.get('jutor_chat_key')
|
11 |
self.transcript_text = self.get_transcript_text(config.get('transcript'))
|
12 |
self.key_moments_text = self.get_key_moments_text(config.get('key_moments'))
|
13 |
-
self.
|
14 |
self.ai_client = config.get('ai_client')
|
15 |
self.instructions = config.get('instructions')
|
16 |
|
@@ -39,15 +39,17 @@ class Chatbot:
|
|
39 |
return key_moments_text
|
40 |
|
41 |
|
42 |
-
def chat(self, user_message, chat_history
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
response_text = self.chat_with_service(service_type, system_prompt, messages)
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
51 |
|
52 |
def prepare_messages(self, chat_history, user_message):
|
53 |
messages = []
|
@@ -62,7 +64,7 @@ class Chatbot:
|
|
62 |
messages.append({"role": "assistant", "content": assistant_msg})
|
63 |
|
64 |
if user_message:
|
65 |
-
user_message += "/n (請一定要用繁體中文回答 zh-TW
|
66 |
messages.append({"role": "user", "content": user_message})
|
67 |
return messages
|
68 |
|
|
|
10 |
self.jutor_chat_key = config.get('jutor_chat_key')
|
11 |
self.transcript_text = self.get_transcript_text(config.get('transcript'))
|
12 |
self.key_moments_text = self.get_key_moments_text(config.get('key_moments'))
|
13 |
+
self.ai_model_name = config.get('ai_model_name')
|
14 |
self.ai_client = config.get('ai_client')
|
15 |
self.instructions = config.get('instructions')
|
16 |
|
|
|
39 |
return key_moments_text
|
40 |
|
41 |
|
42 |
+
def chat(self, user_message, chat_history):
|
43 |
+
try:
|
44 |
+
messages = self.prepare_messages(chat_history, user_message)
|
45 |
+
system_prompt = self.instructions
|
46 |
+
service_type = self.ai_model_name
|
47 |
response_text = self.chat_with_service(service_type, system_prompt, messages)
|
48 |
+
except Exception as e:
|
49 |
+
print(f"Error: {e}")
|
50 |
+
response_text = "學習精靈有點累,請稍後再試!"
|
51 |
+
|
52 |
+
return response_text
|
53 |
|
54 |
def prepare_messages(self, chat_history, user_message):
|
55 |
messages = []
|
|
|
64 |
messages.append({"role": "assistant", "content": assistant_msg})
|
65 |
|
66 |
if user_message:
|
67 |
+
user_message += "/n (請一定要用繁體中文回答 zh-TW,並用台灣人的禮貌口語表達,回答時不要特別說明這是台灣人的語氣,不要提到「台灣腔」,不用提到「逐字稿」這個詞,用「內容」代替),回答時如果有用到數學式,請用數學符號代替純文字(Latex 用 $ 字號 render)"
|
68 |
messages.append({"role": "user", "content": user_message})
|
69 |
return messages
|
70 |
|
local_config_example.json
CHANGED
@@ -1,12 +1,21 @@
|
|
1 |
{
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
{
|
2 |
+
"OUTPUT_PATH": "/Users/young/Downloads",
|
3 |
+
"TRANSCRIPTS": [],
|
4 |
+
"CURRENT_INDEX": 0,
|
5 |
+
"VIDEO_ID": "",
|
6 |
+
"PASSWORD": "6161",
|
7 |
+
"OPEN_AI_KEY": "sk-proj-xxxxxxxxxx",
|
8 |
+
"OPEN_AI_ASSISTANT_ID_GPT4_BOT1": "asst_3cxxxxxxxxxxjrQio9",
|
9 |
+
"OPEN_AI_ASSISTANT_ID_GPT3_BOT1": "asst_mcuxxxxxxxxxx5L4e",
|
10 |
+
"OPEN_AI_KEY_BOT2": "sk-proj-5HsYxxxxxxxxxxkH4pShu",
|
11 |
+
"OPEN_AI_ASSISTANT_ID_GPT4_BOT2": "asst_3xxxxxxxxxxrQio9",
|
12 |
+
"OPEN_AI_ASSISTANT_ID_GPT3_BOT2": "asst_mxxxxxxxxxx6vg5L4e",
|
13 |
+
"GROQ_API_KEY": "gsk_wcTFnH0eKexxxxxxxxxx4JrXz8IGLUXqrYji",
|
14 |
+
"JUTOR_CHAT_KEY": "b4c318bxxxxxxxxxx76b41fd27",
|
15 |
+
"GOOGLE_APPLICATION_CREDENTIALS_JSON": {
|
16 |
+
"xxxxxxxxxx": "xxxxxxxxx",
|
17 |
+
},
|
18 |
+
"AWS_ACCESS_KEY": "xxxxxxxxxx",
|
19 |
+
"AWS_SECRET_KEY": "xxxxxxxxxx",
|
20 |
+
"AWS_REGION_NAME": "us-west-2"
|
21 |
+
}
|