DeepLearning101 commited on
Commit
71e3d07
1 Parent(s): 4a8e02f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -0
app.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import aiohttp
3
+ import asyncio
4
+ import json, os
5
+
6
+ LLM_API = os.environ.get("LLM_API")
7
+ LLM_URL = os.environ.get("LLM_URL")
8
+
9
+ USER_ID = "HuggingFace Space" # Placeholder user ID
10
+
11
+ async def send_chat_message(LLM_URL, LLM_API, user_input):
12
+ payload = {
13
+ "inputs": {},
14
+ "query": user_input,
15
+ "response_mode": "streaming",
16
+ "conversation_id": "",
17
+ "user": USER_ID,
18
+ }
19
+ print("Sending chat message payload:", payload) # Debug information
20
+
21
+ async with aiohttp.ClientSession() as session:
22
+ async with session.post(
23
+ url=f"{LLM_URL}/chat-messages",
24
+ headers={"Authorization": f"Bearer {LLM_API}"},
25
+ json=payload,
26
+ timeout=aiohttp.ClientTimeout(total=60)
27
+ ) as response:
28
+ if response.status != 200:
29
+ print(f"Error: {response.status}")
30
+ return f"Error: {response.status}"
31
+
32
+ # Handle the stream of events
33
+ full_response = []
34
+ async for line in response.content:
35
+ line = line.decode('utf-8').strip()
36
+ if not line:
37
+ continue
38
+ if "data: " not in line:
39
+ continue
40
+ try:
41
+ print("Received line:", line) # Debug information
42
+ data = json.loads(line.split("data: ")[1])
43
+ if "answer" in data:
44
+ full_response.append(data["answer"])
45
+ except (IndexError, json.JSONDecodeError) as e:
46
+ print(f"Error parsing line: {line}, error: {e}") # Debug information
47
+ continue
48
+
49
+ if full_response:
50
+ return ''.join(full_response).strip()
51
+ else:
52
+ return "Error: No thought found in the response"
53
+
54
+ async def handle_input(user_input):
55
+ chat_response = await send_chat_message(LLM_URL, LLM_API, user_input)
56
+ print("Chat response:", chat_response) # Debug information
57
+ return chat_response
58
+
59
+ def run_sync(user_input):
60
+ return asyncio.run(handle_input(user_input))
61
+
62
+ # Define Gradio interface
63
+ user_input = gr.Textbox(label='歡迎問我關於「高熵合金」(High-entropy alloys) 的各種疑難雜症')
64
+ examples = [
65
+ ["AlCoCrFeNi HEA coating 可用怎樣的實驗方法做到 ?"],
66
+ ["請問high entropy nitride coatings的形成,主要可透過那些元素來熱這個材料形成熱穩定?"]
67
+ ]
68
+
69
+ TITLE = """<h1 align="center">Large Language Model (LLM) Playground 💬 <a href='https://support.maicoin.com/zh-TW/support/home' target='_blank'>Cryptocurrency Exchange FAQ</a></h1>"""
70
+ SUBTITLE = """<h2 align="center"><a href='https://www.twman.org' target='_blank'>TonTon Huang Ph.D. @ 2024/06 </a><br></h2>"""
71
+ LINKS = """<a href='https://blog.twman.org/2021/04/ASR.html' target='_blank'>那些語音處理 (Speech Processing) 踩的坑</a> | <a href='https://blog.twman.org/2021/04/NLP.html' target='_blank'>那些自然語言處理 (Natural Language Processing, NLP) 踩的坑</a> | <a href='https://blog.twman.org/2024/02/asr-tts.html' target='_blank'>那些ASR和TTS可能會踩的坑</a> | <a href='https://blog.twman.org/2024/02/LLM.html' target='_blank'>那些大模型開發會踩的坑</a> | <a href='https://blog.twman.org/2023/04/GPT.html' target='_blank'>什麼是大語言模型,它是什麼?想要嗎?</a><br>
72
+ <a href='https://blog.twman.org/2023/07/wsl.html' target='_blank'>用PaddleOCR的PPOCRLabel來微調醫療診斷書和收據</a> | <a href='https://blog.twman.org/2023/07/HugIE.html' target='_blank'>基於機器閱讀理解和指令微調的統一信息抽取框架之診斷書醫囑資訊擷取分析</a><br>
73
+ <a href='https://huggingface.co/spaces/DeepLearning101/High-Entropy-Alloys-FAQ/blob/main/reference.txt' target='_blank'>「高熵合金」(High-entropy alloys) 參考論文</a><br>"""
74
+
75
+ with gr.Blocks() as iface:
76
+ gr.HTML(TITLE)
77
+ gr.HTML(SUBTITLE)
78
+ gr.HTML(LINKS)
79
+ gr.Interface(
80
+ fn=run_sync,
81
+ inputs=user_input,
82
+ outputs="text",
83
+ examples=examples,
84
+ allow_flagging="never"
85
+ )
86
+
87
+ iface.launch()