llm-api / app.py
start3406's picture
Update app.py
b04f0ed verified
import gradio as gr
import os
from openai import OpenAI
# 从环境变量读取两个 API Key
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
# 初始化两个客户端
openai_client = OpenAI(api_key=OPENAI_API_KEY)
deepseek_client = OpenAI(
api_key=DEEPSEEK_API_KEY,
# 如果 DeepSeek 的 API 路径需要带 /v1,可以根据实际文档调整
base_url="https://api.deepseek.com/v1"
)
def generate_response(model_provider, prompt, temperature, top_p, max_tokens, repetition_penalty):
# 根据 model_provider 分发到对应 client 和 model 名称
clients = {
"DeepSeek": (deepseek_client, "deepseek-chat"),
"OpenAI": (openai_client, "gpt-3.5-turbo")
}
client, model = clients[model_provider]
try:
resp = client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": prompt}],
temperature=temperature,
top_p=top_p,
max_tokens=max_tokens,
# repetition_penalty 建议映射到 frequency_penalty 或 presence_penalty,根据需求选一个
frequency_penalty=repetition_penalty,
presence_penalty=0.0
)
return resp.choices[0].message.content.strip()
except Exception as e:
return f"{model_provider} API Error: {e}"
with gr.Blocks(theme=gr.themes.Soft()) as iface:
gr.Markdown("## 🧠 DeepSeek / OpenAI 聊天演示(可调参)")
with gr.Row():
provider = gr.Dropdown(
choices=["DeepSeek", "OpenAI"],
value="DeepSeek",
label="模型供应商"
)
temperature = gr.Slider(
minimum=0.1, maximum=1.5, step=0.1, value=0.7,
label="Temperature"
)
top_p = gr.Slider(
minimum=0.1, maximum=1.0, step=0.05, value=0.9,
label="Top-p"
)
prompt = gr.Textbox(
label="Prompt",
lines=6,
placeholder="在这里输入你的问题……"
)
with gr.Row():
max_tokens = gr.Slider(
minimum=32, maximum=2048, step=32, value=512,
label="Max Tokens"
)
rep_penalty = gr.Slider(
minimum=0.0, maximum=2.0, step=0.1, value=1.1,
label="Frequency Penalty"
)
output = gr.Textbox(label="Response")
btn = gr.Button("生成回答")
btn.click(
fn=generate_response,
inputs=[provider, prompt, temperature, top_p, max_tokens, rep_penalty],
outputs=output
)
iface.launch()