File size: 6,275 Bytes
e407f50
7c466b0
e407f50
 
f9fe6a2
766fe34
a97fead
7c466b0
10b839f
 
a97fead
d021e73
7631ee8
10b839f
 
 
 
 
 
 
7c2f198
7631ee8
7942eff
 
 
 
 
 
 
 
 
 
 
7631ee8
7942eff
 
 
 
 
 
 
 
 
 
 
c98d831
7942eff
8303b37
7942eff
 
 
 
 
 
 
 
 
 
7631ee8
 
 
 
a97fead
10b839f
7c466b0
7631ee8
 
 
 
 
 
8303b37
f9fe6a2
10b839f
 
 
766fe34
10b839f
766fe34
10b839f
766fe34
10b839f
7c466b0
766fe34
 
 
c98d831
2bd5909
10b839f
9dbcd94
601f488
 
 
 
 
 
 
 
 
 
 
 
7c466b0
f9fe6a2
 
10b839f
 
 
 
 
766fe34
10b839f
 
9d02764
c98d831
10b839f
7c466b0
f9fe6a2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import os
import gradio as gr
import aiohttp
import asyncio
import json
import urllib.parse
import traceback

LLM_API = os.environ.get("LLM_API")
LLM_URL = os.environ.get("LLM_URL")
USER_ID = "HuggingFace Space"

async def send_chat_message(LLM_URL, LLM_API, user_input):
    payload = {
        "inputs": {},
        "query": user_input,
        "response_mode": "streaming",
        "conversation_id": "",
        "user": USER_ID,
    }
    print("Sending chat message payload:", payload)

    async with aiohttp.ClientSession() as session:
        try:
            async with session.post(
                url=f"{LLM_URL}/chat-messages",
                headers={"Authorization": f"Bearer {LLM_API}"},
                json=payload,
                timeout=aiohttp.ClientTimeout(total=180)
            ) as response:
                if response.status != 200:
                    print(f"Error: {response.status}")
                    return f"Error: Status code {response.status}"

                full_response = []
                async for line in response.content.iter_chunked(2048):
                    line = line.decode('utf-8').strip()
                    if not line or "data: " not in line:
                        continue
                    try:
                        data = json.loads(line.split("data: ")[1])
                        if "answer" in data:
                            decoded_answer = urllib.parse.unquote(data["answer"])
                            full_response.append(decoded_answer)
                    except (IndexError, json.JSONDecodeError) as e:
                        print(f"Skipping invalid line: {line}, error: {e}")
                        continue

                if full_response:
                    return ''.join(full_response).strip()
                else:
                    return "Error: No response found in the response"
        except aiohttp.ClientConnectorError:
            return "Error: Cannot connect to the API server. Please check the URL and server status."
        except Exception as e:
            print("Exception occurred in send_chat_message:")
            print(traceback.format_exc())
            return f"Exception: {e}"

async def handle_input(user_input):
    print(f"Handling input: {user_input}")
    chat_response = await send_chat_message(LLM_URL, LLM_API, user_input)
    print("Chat response:", chat_response)
    return chat_response

def run_sync(func, *args):
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    result = loop.run_until_complete(func(*args))
    loop.close()
    return result
    
# 定義 Gradio 介面
user_input = gr.Textbox(label='請輸入您想查詢的關鍵公司名稱')
examples = [
    ["加密貨幣"],
    # ["國泰金控"],
    ["中華電信"],
    # ["台灣大哥大"],
    ["台積電"],
    # ["BlockTempo"]
]




TITLE = """<h1>Social Media Trends 💬 分析社群相關資訊,並判斷其正、負、中立等評價及趨勢 (數據大會跑很久或失敗) </h1>"""
SUBTITLE = """<h2><a href='https://www.twman.org' target='_blank'>TonTon Huang Ph.D.</a> | <a href='https://blog.twman.org/p/deeplearning101.html' target='_blank'>手把手帶你一起踩AI坑</a><br></h2>"""
LINKS = """
<a href='https://github.com/Deep-Learning-101' target='_blank'>Deep Learning 101 Github</a> | <a href='http://deeplearning101.twman.org' target='_blank'>Deep Learning 101</a> | <a href='https://www.facebook.com/groups/525579498272187/' target='_blank'>台灣人工智慧社團 FB</a> | <a href='https://www.youtube.com/c/DeepLearning101' target='_blank'>YouTube</a><br>
<a href='https://blog.twman.org/2025/03/AIAgent.html' target='_blank'>那些 AI Agent 要踩的坑</a>:探討多種 AI 代理人工具的應用經驗與挑戰,分享實用經驗與工具推薦。<br>
<a href='https://blog.twman.org/2024/08/LLM.html' target='_blank'>白話文手把手帶你科普 GenAI</a>:淺顯介紹生成式人工智慧核心概念,強調硬體資源和數據的重要性。<br>
<a href='https://blog.twman.org/2024/09/LLM.html' target='_blank'>大型語言模型直接就打完收工?</a>:回顧 LLM 領域探索歷程,討論硬體升級對 AI 開發的重要性。<br>
<a href='https://blog.twman.org/2024/07/RAG.html' target='_blank'>那些檢索增強生成要踩的坑</a>:探討 RAG 技術應用與挑戰,提供實用經驗分享和工具建議。<br>
<a href='https://blog.twman.org/2024/02/LLM.html' target='_blank'>那些大型語言模型要踩的坑</a>:探討多種 LLM 工具的應用與挑戰,強調硬體資源的重要性。<br>
<a href='https://blog.twman.org/2023/04/GPT.html' target='_blank'>Large Language Model,LLM</a>:探討 LLM 的發展與應用,強調硬體資源在開發中的關鍵作用。。<br>
<a href='https://blog.twman.org/2024/11/diffusion.html' target='_blank'>ComfyUI + Stable Diffuision</a>:深入探討影像生成與分割技術的應用,強調硬體資源的重要性。<br>
<a href='https://blog.twman.org/2024/02/asr-tts.html' target='_blank'>那些ASR和TTS可能會踩的坑</a>:探討 ASR 和 TTS 技術應用中的問題,強調數據質量的重要性。<br>
<a href='https://blog.twman.org/2021/04/NLP.html' target='_blank'>那些自然語言處理 (Natural Language Processing, NLP) 踩的坑</a>:分享 NLP 領域的實踐經驗,強調數據質量對模型效果的影響。<br>
<a href='https://blog.twman.org/2021/04/ASR.html' target='_blank'>那些語音處理 (Speech Processing) 踩的坑</a>:分享語音處理領域的實務經驗,強調資料品質對模型效果的影響。<br>
<a href='https://blog.twman.org/2023/07/wsl.html' target='_blank'>用PPOCRLabel來幫PaddleOCR做OCR的微調和標註</a><br>
<a href='https://blog.twman.org/2023/07/HugIE.html' target='_blank'>基於機器閱讀理解和指令微調的統一信息抽取框架之診斷書醫囑資訊擷取分析</a><br>
"""

# 使用 Gradio Blocks 設定頁面內容
with gr.Blocks() as iface:
    gr.HTML(TITLE)
    gr.HTML(SUBTITLE)
    gr.HTML(LINKS)
    gr.Interface(
        fn=lambda x: run_sync(handle_input, x),
        inputs=user_input,
        outputs="text",
        examples=examples,
        flagging_mode="never"  # 停用範例加載
    )

iface.launch()