|
import gradio as gr |
|
import os |
|
from openai import OpenAI |
|
|
|
|
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") |
|
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY") |
|
|
|
|
|
openai_client = OpenAI(api_key=OPENAI_API_KEY) |
|
deepseek_client = OpenAI( |
|
api_key=DEEPSEEK_API_KEY, |
|
|
|
base_url="https://api.deepseek.com/v1" |
|
) |
|
|
|
def generate_response(model_provider, prompt, temperature, top_p, max_tokens, repetition_penalty): |
|
|
|
clients = { |
|
"DeepSeek": (deepseek_client, "deepseek-chat"), |
|
"OpenAI": (openai_client, "gpt-3.5-turbo") |
|
} |
|
client, model = clients[model_provider] |
|
|
|
try: |
|
resp = client.chat.completions.create( |
|
model=model, |
|
messages=[{"role": "user", "content": prompt}], |
|
temperature=temperature, |
|
top_p=top_p, |
|
max_tokens=max_tokens, |
|
|
|
frequency_penalty=repetition_penalty, |
|
presence_penalty=0.0 |
|
) |
|
return resp.choices[0].message.content.strip() |
|
except Exception as e: |
|
return f"{model_provider} API Error: {e}" |
|
|
|
with gr.Blocks(theme=gr.themes.Soft()) as iface: |
|
gr.Markdown("## 🧠 DeepSeek / OpenAI 聊天演示(可调参)") |
|
with gr.Row(): |
|
provider = gr.Dropdown( |
|
choices=["DeepSeek", "OpenAI"], |
|
value="DeepSeek", |
|
label="模型供应商" |
|
) |
|
temperature = gr.Slider( |
|
minimum=0.1, maximum=1.5, step=0.1, value=0.7, |
|
label="Temperature" |
|
) |
|
top_p = gr.Slider( |
|
minimum=0.1, maximum=1.0, step=0.05, value=0.9, |
|
label="Top-p" |
|
) |
|
prompt = gr.Textbox( |
|
label="Prompt", |
|
lines=6, |
|
placeholder="在这里输入你的问题……" |
|
) |
|
with gr.Row(): |
|
max_tokens = gr.Slider( |
|
minimum=32, maximum=2048, step=32, value=512, |
|
label="Max Tokens" |
|
) |
|
rep_penalty = gr.Slider( |
|
minimum=0.0, maximum=2.0, step=0.1, value=1.1, |
|
label="Frequency Penalty" |
|
) |
|
output = gr.Textbox(label="Response") |
|
btn = gr.Button("生成回答") |
|
btn.click( |
|
fn=generate_response, |
|
inputs=[provider, prompt, temperature, top_p, max_tokens, rep_penalty], |
|
outputs=output |
|
) |
|
|
|
iface.launch() |
|
|