Spaces:
Sleeping
Sleeping
| import requests | |
| import json | |
| import os | |
| from dotenv import load_dotenv | |
| # 加载 .env 文件中的环境变量 | |
| load_dotenv() | |
| # 从环境变量中获取配置 | |
| # 代理服务器的地址 | |
| # 确保 anyapi.py 正在运行在 127.0.0.1:7860 | |
| BASE_URL = os.getenv("BASE_URL", "http://localhost:7860/v1/https/generativelanguage.googleapis.com/v1beta/openai") | |
| # 目标 OpenAI 兼容 API 的完整路径 | |
| # 例如,对于智谱AI的GLM模型,其API路径可能是 https://open.bigmodel.cn/api/paas/v4/chat/completions | |
| # 代理服务器会将其解析为协议、域名和路径 | |
| ENTER_POINT = os.getenv("ENTER_POINT", "/chat/completions") | |
| # 完整的代理URL | |
| API_URL = f"{BASE_URL}{ENTER_POINT}" | |
| # 您的 API 密钥 | |
| # 注意:在实际应用中,API 密钥不应硬编码在代码中,应通过环境变量等方式安全管理。 | |
| API_KEY = os.getenv("API_KEY", "YOUR_ACTUAL_API_KEY_HERE") | |
| MODEL_NAME = os.getenv("MODEL_NAME", "gemini-2.5-flash-preview-05-20") | |
| def call_openai_compatible_api(prompt_message: str): | |
| print("正在调用 OpenAI 兼容 API...") | |
| print('prompt_message',prompt_message) | |
| """ | |
| 调用 OpenAI 兼容 API 的示例函数。 | |
| """ | |
| headers = { | |
| "Content-Type": "application/json", | |
| "Authorization": f"Bearer {API_KEY}", | |
| "accept-encoding": "identity" | |
| } | |
| payload = { | |
| "model": f"{MODEL_NAME}", # 替换为您希望使用的模型名称,例如 "gpt-3.5-turbo", "glm-4" 等 | |
| "messages": [ | |
| {"role": "system", "content": "你是一个有用的助手。"}, | |
| {"role": "user", "content": prompt_message} | |
| ], | |
| "stream": True # 如果需要流式响应,可以设置为 True | |
| } | |
| print(f"正在向代理服务器发送请求: {API_URL}") | |
| print(f"请求头部: {headers}") | |
| print(f"请求体: {json.dumps(payload, indent=2, ensure_ascii=False)}") | |
| try: | |
| response = requests.post(API_URL, headers=headers, json=payload, stream=payload.get("stream", False)) | |
| response.raise_for_status() # 如果请求返回了错误状态码,会抛出 HTTPError | |
| print("\n代理服务器响应:") | |
| if payload.get("stream"): | |
| full_response_content = "" | |
| for line in response.iter_lines(): | |
| if line: | |
| decoded_line = line.decode('utf-8') | |
| if decoded_line.startswith("data:"): | |
| json_data = decoded_line[len("data:"):].strip() | |
| if json_data == "[DONE]": | |
| break | |
| try: | |
| parsed_json = json.loads(json_data) | |
| # 打印或处理每个流式块 | |
| # print(json.dumps(parsed_json, indent=2, ensure_ascii=False)) | |
| # 假设我们只关心 content | |
| print(json.dumps(parsed_json, indent=2, ensure_ascii=False)) | |
| full_response_content += json_data # 收集原始JSON数据 | |
| except json.JSONDecodeError as e: | |
| print(f"JSON 解析错误: {e} - {json_data}") | |
| print("\n--- 流式响应结束 ---") | |
| return {"raw_stream_data": full_response_content} # 返回一个包含原始流数据的字典 | |
| else: | |
| print(json.dumps(response.json(), indent=2, ensure_ascii=False)) | |
| return response.json() | |
| except requests.exceptions.HTTPError as http_err: | |
| print(f"HTTP 错误发生: {http_err}") | |
| print(f"响应状态码: {response.status_code}") | |
| print(f"响应体: {response.text}") | |
| except requests.exceptions.ConnectionError as conn_err: | |
| print(f"连接错误发生: {conn_err}") | |
| print("请确保代理服务器 (anyapi.py) 正在运行。") | |
| except requests.exceptions.Timeout as timeout_err: | |
| print(f"请求超时: {timeout_err}") | |
| except requests.exceptions.RequestException as req_err: | |
| print(f"发生未知错误: {req_err}") | |
| if __name__ == "__main__": | |
| # 在运行此脚本之前,请确保: | |
| # 1. anyapi.py 代理服务器正在运行。 | |
| # 2. 您已将 API_KEY 替换为您的实际 API 密钥。 | |
| # 3. 您已安装 requests 库 (pip install requests)。 | |
| user_query = "你好!请介绍一下你自己。" | |
| call_openai_compatible_api(user_query) | |
| # 您可以尝试不同的查询 | |
| # call_openai_compatible_api("请给我讲一个关于人工智能的笑话。") | |