Spaces:
Sleeping
Sleeping
File size: 4,510 Bytes
526927a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
#!/usr/bin/env python3
"""
๊ณต๊ฐ ๋ชจ๋ธ์ ์ฌ์ฉํ๋ ๋์ ์ฝ๋
"""
import gradio as gr
import requests
import json
def chat_with_public_model(message, history):
"""๊ณต๊ฐ ๋ชจ๋ธ๋ก ์ฑํ
"""
try:
# ๊ณต๊ฐ ๋ชจ๋ธ ์ฌ์ฉ (ํ ํฐ ๋ถํ์)
api_url = "https://api-inference.huggingface.co/models/microsoft/DialoGPT-medium"
data = {
"inputs": message,
"parameters": {
"max_new_tokens": 100,
"temperature": 0.7,
"do_sample": True
}
}
response = requests.post(api_url, json=data, timeout=30)
if response.status_code == 200:
result = response.json()
if isinstance(result, list) and len(result) > 0:
return result[0].get("generated_text", "์ฃ์กํฉ๋๋ค. ์๋ต์ ์์ฑํ ์ ์์ต๋๋ค.")
else:
return "์ฃ์กํฉ๋๋ค. ์๋ต์ ์์ฑํ ์ ์์ต๋๋ค."
else:
return f"API ํธ์ถ ์คํจ: {response.status_code}"
except Exception as e:
return f"์ค๋ฅ ๋ฐ์: {str(e)}"
def solve_math_with_public_model(problem):
"""๊ณต๊ฐ ๋ชจ๋ธ๋ก ์ํ ๋ฌธ์ ํด๊ฒฐ"""
try:
# ์ํ ๋ฌธ์ ํด๊ฒฐ์ ์ํ ํ๋กฌํํธ
prompt = f"๋ค์ ์ํ ๋ฌธ์ ๋ฅผ ํ์ด์ฃผ์ธ์: {problem}"
# ๊ณต๊ฐ ๋ชจ๋ธ ์ฌ์ฉ
api_url = "https://api-inference.huggingface.co/models/gpt2"
data = {
"inputs": prompt,
"parameters": {
"max_new_tokens": 150,
"temperature": 0.3,
"do_sample": True
}
}
response = requests.post(api_url, json=data, timeout=30)
if response.status_code == 200:
result = response.json()
if isinstance(result, list) and len(result) > 0:
return result[0].get("generated_text", "์ฃ์กํฉ๋๋ค. ์ํ ๋ฌธ์ ๋ฅผ ํ ์ ์์ต๋๋ค.")
else:
return "์ฃ์กํฉ๋๋ค. ์ํ ๋ฌธ์ ๋ฅผ ํ ์ ์์ต๋๋ค."
else:
return f"API ํธ์ถ ์คํจ: {response.status_code}"
except Exception as e:
return f"์ค๋ฅ ๋ฐ์: {str(e)}"
# Gradio ์ธํฐํ์ด์ค ์์ฑ
with gr.Blocks(title="Lily Math RAG System (Public Model)", theme=gr.themes.Soft()) as demo:
gr.Markdown("# ๐งฎ Lily Math RAG System (๊ณต๊ฐ ๋ชจ๋ธ)")
gr.Markdown("์ํ ๋ฌธ์ ํด๊ฒฐ์ ์ํ AI ์์คํ
์
๋๋ค. (๊ณต๊ฐ ๋ชจ๋ธ ์ฌ์ฉ)")
with gr.Tabs():
# ์ฑํ
ํญ
with gr.Tab("๐ฌ ์ฑํ
", icon="๐ฌ"):
chatbot = gr.Chatbot(height=400)
msg = gr.Textbox(
label="๋ฉ์์ง๋ฅผ ์
๋ ฅํ์ธ์",
placeholder="์๋
ํ์ธ์! ์ํ ๋ฌธ์ ๋ฅผ ๋์์ฃผ์ธ์.",
lines=2
)
clear = gr.Button("๋ํ ์ด๊ธฐํ")
def respond(message, chat_history):
bot_message = chat_with_public_model(message, chat_history)
chat_history.append((message, bot_message))
return "", chat_history
msg.submit(respond, [msg, chatbot], [msg, chatbot])
clear.click(lambda: None, None, chatbot, queue=False)
# ์ํ ๋ฌธ์ ํด๊ฒฐ ํญ
with gr.Tab("๐งฎ ์ํ ๋ฌธ์ ํด๊ฒฐ", icon="๐งฎ"):
with gr.Row():
with gr.Column():
math_input = gr.Textbox(
label="์ํ ๋ฌธ์ ",
placeholder="์: 2x + 5 = 13",
lines=3
)
solve_btn = gr.Button("๋ฌธ์ ํ๊ธฐ", variant="primary")
with gr.Column():
math_output = gr.Textbox(
label="ํด๋ต",
lines=8,
interactive=False
)
solve_btn.click(solve_math_with_public_model, math_input, math_output)
# ์ค์ ํญ
with gr.Tab("โ๏ธ ์ค์ ", icon="โ๏ธ"):
gr.Markdown("## ์์คํ
์ ๋ณด")
gr.Markdown("**๋ชจ๋ธ**: ๊ณต๊ฐ ๋ชจ๋ธ (ํ ํฐ ๋ถํ์)")
gr.Markdown("**์ํ**: โ
์๋ ์ค")
gr.Markdown("**๋ฒ์ **: 1.0.0 (๊ณต๊ฐ ๋ชจ๋ธ)")
if __name__ == "__main__":
demo.launch() |