Spaces:
Runtime error
Runtime error
from openai import OpenAI | |
import gradio as gr | |
import time | |
import tiktoken | |
def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613"): | |
"""Return the number of tokens used by a list of messages.""" | |
try: | |
encoding = tiktoken.encoding_for_model(model) | |
except KeyError: | |
print("Warning: model not found. Using cl100k_base encoding.") | |
encoding = tiktoken.get_encoding("cl100k_base") | |
# Define token counts based on model | |
tokens_per_message = 3 | |
tokens_per_name = 1 | |
if model == "gpt-3.5-turbo-0301": | |
tokens_per_message = 4 # {role/name}\n{content}\n | |
tokens_per_name = -1 # if there's a name, the role is omitted | |
elif "gpt-3.5-turbo" in model: | |
print("Warning: gpt-3.5-turbo may update over time. Using default for gpt-3.5-turbo-0613.") | |
elif "gpt-4" in model: | |
print("Warning: gpt-4 may update over time. Using default for gpt-4-0613.") | |
num_tokens = sum(tokens_per_message + sum(len(encoding.encode(value)) for key, value in message.items() if key != "name") | |
+ (len(encoding.encode(message.get("name", ""))) + tokens_per_name if "name" in message else 0) | |
for message in messages) | |
num_tokens += 3 # every reply is primed with assistant | |
return num_tokens | |
def update_textbox(endpoint, transmit_api_key, official_api_key ): | |
if endpoint == "https://lmzh.top/v1": | |
if not transmit_api_key: | |
raise gr.Error('你选择的是中转接口,请在 key 验证中先验证中转 API Key') | |
return transmit_api_key | |
elif endpoint == "https://api.openai.com/v1": | |
if not official_api_key: | |
raise gr.Error('你选择的是官方接口请在 key 验证中先验证官方 API Key') | |
return official_api_key | |
else: | |
raise gr.Error('未选择端点') | |
def generate_response(question, model_name, endpoint, transmit_api_key, official_api_key): | |
api_key = update_textbox(endpoint, transmit_api_key, official_api_key ) | |
# 创建 OpenAI 客户端 | |
#判断接口是否有值 | |
if endpoint == '': | |
raise gr.Error('请根据key类型选择不同的接口地址') | |
client = OpenAI(api_key=api_key, base_url=endpoint) | |
messages = [ | |
{"role": "user", "content": question} | |
] | |
try: | |
response = client.chat.completions.create( | |
model=model_name, | |
messages=messages, | |
max_tokens=1000, | |
temperature=0.1, | |
stream=True, | |
) | |
result="" | |
for chunk in response: | |
if chunk.choices[0].delta.content is not None: | |
content = chunk.choices[0].delta.content | |
for char in content: | |
result += char | |
yield result | |
if char in ",。!?": | |
time.sleep(0.2) # 较短的延迟时间 | |
else: | |
time.sleep(0.05) # 更短的延迟时间 | |
except Exception as e: | |
return f"响应生成失败: {str(e)}" | |
base_url_options=["https://lmzh.top/v1","https://api.openai.com/v1"] | |
model_options = ["gpt-3.5-turbo", "gpt-4o-mini", "gpt-4o", "gpt-4o-2024-08-06"] | |
def clear_output(): | |
return "","" | |
def validate_tran_api_key(api_key): | |
if api_key == '': | |
raise gr.Error('请输入您的中转API Key') | |
try: | |
client = OpenAI(api_key=api_key, base_url='https://lmzh.top/v1') | |
response = client.models.list() | |
models_list = [model.id for model in response.data] | |
# 进行简单的聊天测试 | |
try: | |
test_response = client.chat.completions.create( | |
model="gpt-4o", | |
messages=[ | |
{"role": "user", "content": "This is a test"} | |
], | |
max_tokens=5 | |
) | |
quota_status = "Quota 容量经检测可用." | |
except Exception as e: | |
quota_status = "Quota 容量已经耗尽或发生未知错误" | |
return f"经检查API-key有效.\n{quota_status}\n可访问Models列表:\n{', '.join(models_list)}" | |
except Exception as e: | |
return f"Error: {str(e)}" | |
def validate_offi_api_key(api_key): | |
if api_key == '': | |
raise gr.Error('请输入您的官方API Key') | |
try: | |
client = OpenAI(api_key=api_key) | |
response = client.models.list() | |
models_list = [model.id for model in response.data] | |
# 进行简单的聊天测试 | |
try: | |
test_response = client.chat.completions.create( | |
model="gpt-4o", # 使用一个合适的模型名称进行测试,如'gpt-4o-mini'等 | |
messages=[ | |
{"role": "user", "content": "This is a test"} | |
], | |
max_tokens=5 | |
) | |
quota_status = "Quota 容量经检测可用." | |
except Exception as e: | |
quota_status = "Quota 容量已经耗尽或发生未知错误." | |
return f"经检查官方API-key有效 .\n{quota_status}\n可访问Models列表:\n{', '.join(models_list)}" | |
except Exception as e: | |
return f"Error: {str(e)}" | |
# def update_api_key(endpoint, transmit_api_key, official_api_key): | |
# if transmit_api_key == '' or official_api_key =='': | |
# raise gr.Error('请在key验证中先验证api key') | |
# if "transmit" in endpoint: # 检查 endpoint 是否包含 "transmit" | |
# return transmit_api_key if transmit_api_key else "请填写您的中转API Key" | |
# else: # 否则认为是官方端点 | |
# return official_api_key if official_api_key else "请填写您的官方API Key" | |
# Gradio 前端设计 | |
app = gr.Blocks(css="style.css",title="sipole_verify_key") | |
with app: | |
gr.HTML(''' | |
<div class="header"> | |
<div class="header-title">API-KEY验证</div> | |
<div class="header-subtitle"> | |
中转key购买地址 <a href="https://buy.sipola.cn" target="_blank">here</a>,ai文案生成可使用中转key,请访问 <a href="https://ai.sipola.cn" target="_blank">here</a> | |
</div> | |
</div> | |
''') | |
with gr.Row(variant='panel'): | |
with gr.Column(): | |
transmit_api_key = gr.Textbox(type='password', label='中转API-Key', placeholder='请在此填写您的中转API Key', elem_classes="custom-textbox") | |
with gr.Row(variant='panel'): | |
transmit_btn_text = gr.Button("点击验证中转key", variant="primary", elem_classes="custom-button") | |
clear_btn = gr.Button("清除", variant="primary", elem_classes="custom-button") | |
with gr.Column(): | |
transmit_output_textbox = gr.Textbox(label="显示key状态及可访问的模型列表", lines=5,max_lines=5, elem_classes="custom-textbox") | |
transmit_btn_text.click(validate_tran_api_key,transmit_api_key,transmit_output_textbox) | |
clear_btn.click(clear_output,outputs=[transmit_api_key,transmit_output_textbox],show_progress=False) | |
with gr.Row(variant='panel'): | |
with gr.Column(): | |
official_api_key = gr.Textbox(type='password', label='官方API Key', placeholder='请在此输入你的官方API Key', elem_classes="custom-textbox") | |
with gr.Row(variant='panel'): | |
official_btn_text = gr.Button("点击验证官方key", variant="primary", elem_classes="custom-button") | |
clear_btn = gr.Button("清除", variant="primary", elem_classes="custom-button") | |
with gr.Column(): | |
official_output_textbox = gr.Textbox(label="显示key状态及可访问的模型列表", lines=5, max_lines=5, elem_classes="custom-textbox" ) | |
official_btn_text.click(validate_offi_api_key, official_api_key, official_output_textbox) | |
clear_btn.click(clear_output,outputs=[official_api_key,official_output_textbox],show_progress=False) | |
with gr.Row(variant='panel'): | |
with gr.Column(): | |
question=gr.Textbox(label="请输入你的问题: ",lines=2,max_lines=3) | |
model_name=gr.Dropdown(label="选择模型", choices=model_options, value="gpt-3.5-turbo") | |
endpoint=gr.Dropdown(label="选择官方或者接口地址", choices=base_url_options, value='https://lmzh.top/v1') | |
# endpoint.change(update_api_key, inputs=[endpoint, transmit_api_key, official_api_key], outputs=api_key) | |
with gr.Row(variant='panel'): | |
message_btn = gr.Button("发送", variant="primary", elem_classes="custom-button") | |
clear_btn = gr.Button("清除", variant="primary", elem_classes="custom-button") | |
with gr.Column(): | |
output_textbox = gr.Textbox(label="显示问题答案", lines=15, max_lines=15, elem_classes="custom-textbox",interactive=True,show_label=True,show_copy_button=True) | |
output_textbox2 = gr.Textbox(label="显示tokens数", lines=1, max_lines=2, elem_classes="custom-textbox",interactive=False,show_label=True) | |
message_btn.click(generate_response,inputs=[question,model_name,endpoint,transmit_api_key, official_api_key],outputs=output_textbox,show_progress=False) | |
clear_btn.click(clear_output,outputs=[question,output_textbox],show_progress=False) | |
output_textbox.change(fn=num_tokens_from_messages,inputs=output_texbox,outputs=output_textbox2) | |
#添加页面底部 | |
gr.HTML(''' | |
<div class="footer"> | |
<center><p>Power by sipola </p></center> | |
</div> | |
''') | |
app.queue() | |
app.launch(show_error=True) |