File size: 3,550 Bytes
ce5b5d6 e83a65c ce5b5d6 10a976f e83a65c ce5b5d6 e83a65c ce5b5d6 e83a65c 920cdf0 ce5b5d6 e83a65c ce5b5d6 e83a65c ce5b5d6 e83a65c ce5b5d6 e83a65c ce5b5d6 e83a65c ce5b5d6 e83a65c ce5b5d6 e83a65c aff9c1d e83a65c 621cfb5 e83a65c 621cfb5 e83a65c ce5b5d6 e83a65c ce5b5d6 e83a65c ce5b5d6 e83a65c ce5b5d6 e83a65c ce5b5d6 e83a65c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.initial_prompt import INITIAL_PROMPT
from prompts.main_prompt import MAIN_PROMPT
# .env ํ์ผ์์ OPENAI_API_KEY ๋ก๋
if os.path.exists(".env"):
load_dotenv(".env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=OPENAI_API_KEY)
def gpt_call(history, user_message,
model="gpt-4o-mini",
max_tokens=512,
temperature=0.7,
top_p=0.95):
"""
OpenAI ChatCompletion API๋ฅผ ํตํด ๋ต๋ณ์ ์์ฑํ๋ ํจ์.
- history: [(user_text, assistant_text), ...]
- user_message: ์ฌ์ฉ์๊ฐ ๋ฐฉ๊ธ ์
๋ ฅํ ๋ฉ์์ง
"""
# 1) ์์คํ
๋ฉ์์ง(=MAIN_PROMPT)๋ฅผ ๊ฐ์ฅ ์์ ์ถ๊ฐ
messages = [{"role": "system", "content": MAIN_PROMPT}]
# 2) ๊ธฐ์กด ๋ํ ๊ธฐ๋ก(history)์ OpenAI ํ์์ผ๋ก ๋ณํ
# user_text -> 'user' / assistant_text -> 'assistant'
for user_text, assistant_text in history:
if user_text:
messages.append({"role": "user", "content": user_text})
if assistant_text:
messages.append({"role": "assistant", "content": assistant_text})
# 3) ๋ง์ง๋ง์ ์ด๋ฒ ์ฌ์ฉ์์ ์
๋ ฅ์ ์ถ๊ฐ
messages.append({"role": "user", "content": user_message})
# 4) OpenAI API ํธ์ถ
completion = client.chat.completions.create(
model=model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p
)
return completion.choices[0].message.content
def respond(user_message, history):
"""
Gradio ์์์ submitํ ๋ ํธ์ถ๋๋ ํจ์
- user_message: ์ฌ์ฉ์๊ฐ ๋ฐฉ๊ธ ์น ๋ฉ์์ง
- history: ๊ธฐ์กด (user, assistant) ํํ ๋ฆฌ์คํธ
"""
# ์ฌ์ฉ์๊ฐ ๋น ๋ฌธ์์ด์ ๋ณด๋๋ค๋ฉด ์๋ฌด ์ผ๋ ํ์ง ์์
if not user_message:
return "", history
# GPT ๋ชจ๋ธ๋ก๋ถํฐ ์๋ต์ ๋ฐ์
assistant_reply = gpt_call(history, user_message)
# history์ (user, assistant) ์ ์ถ๊ฐ
history.append((user_message, assistant_reply))
# Gradio์์๋ (์๋ก ๋น์์ง ์
๋ ฅ์ฐฝ, ๊ฐฑ์ ๋ history)๋ฅผ ๋ฐํ
return "", history
##############################
# Gradio Blocks UI
##############################
with gr.Blocks() as demo:
gr.Markdown("## Simple Chat Interface")
# Chatbot ์ด๊ธฐ ์ํ๋ฅผ ์ค์
# ์ฒซ ๋ฒ์งธ ๋ฉ์์ง๋ (user="", assistant=INITIAL_PROMPT) ํํ๋ก ๋ฃ์ด
# ํ๋ฉด์์์ 'assistant'๊ฐ INITIAL_PROMPT๋ฅผ ๋งํ ๊ฒ์ฒ๋ผ ๋ณด์ด๊ฒ ํจ
chatbot = gr.Chatbot(
value=[("", INITIAL_PROMPT)], # (user, assistant)
height=500
)
# (user, assistant) ์์ ์ ์ฅํ ํ์คํ ๋ฆฌ ์ํ
# ์ฌ๊ธฐ์๋ ๋์ผํ ์ด๊ธฐ ์ํ๋ฅผ ๋ฃ์ด์ค
state_history = gr.State([("", INITIAL_PROMPT)])
# ์ฌ์ฉ์ ์
๋ ฅ
user_input = gr.Textbox(
placeholder="Type your message here...",
label="Your Input"
)
# ์
๋ ฅ์ด submit๋๋ฉด respond() ํธ์ถ โ ์ถ๋ ฅ์ (์ ์
๋ ฅ์ฐฝ, ๊ฐฑ์ ๋ chatbot)
user_input.submit(
respond,
inputs=[user_input, state_history],
outputs=[user_input, chatbot]
).then(
# respond ๋๋ ๋ค, ์ต์ history๋ฅผ state_history์ ๋ฐ์
fn=lambda _, h: h,
inputs=[user_input, chatbot],
outputs=[state_history]
)
# ๋ฉ์ธ ์คํ
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, share=True) |