Spaces:
Sleeping
Sleeping
update
Browse files- .gitignore +2 -0
- Dockerfile +16 -0
- agent_example.py +103 -0
- note.md +64 -0
- proxyserver-fastapi.py +89 -0
- proxyserver-flask.py +97 -0
- push.sh +3 -0
- readme.md +102 -0
- requirements.txt +6 -0
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
.env
|
Dockerfile
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
| 2 |
+
# you will also find guides on how best to write your Dockerfile
|
| 3 |
+
|
| 4 |
+
FROM python:3.12
|
| 5 |
+
|
| 6 |
+
RUN useradd -m -u 1000 user
|
| 7 |
+
USER user
|
| 8 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
| 9 |
+
|
| 10 |
+
WORKDIR /app
|
| 11 |
+
|
| 12 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
| 13 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 14 |
+
|
| 15 |
+
COPY --chown=user . /app
|
| 16 |
+
CMD ["uvicorn", "proxyserver-fastapi:app", "--host", "0.0.0.0", "--port", "7860"]
|
agent_example.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
from dotenv import load_dotenv
|
| 5 |
+
|
| 6 |
+
# 加载 .env 文件中的环境变量
|
| 7 |
+
load_dotenv()
|
| 8 |
+
|
| 9 |
+
# 从环境变量中获取配置
|
| 10 |
+
# 代理服务器的地址
|
| 11 |
+
# 确保 anyapi.py 正在运行在 127.0.0.1:7860
|
| 12 |
+
BASE_URL = os.getenv("BASE_URL", "http://localhost:7860/v1/https/generativelanguage.googleapis.com/v1beta/openai")
|
| 13 |
+
|
| 14 |
+
# 目标 OpenAI 兼容 API 的完整路径
|
| 15 |
+
# 例如,对于智谱AI的GLM模型,其API路径可能是 https://open.bigmodel.cn/api/paas/v4/chat/completions
|
| 16 |
+
# 代理服务器会将其解析为协议、域名和路径
|
| 17 |
+
ENTER_POINT = os.getenv("ENTER_POINT", "/chat/completions")
|
| 18 |
+
|
| 19 |
+
# 完整的代理URL
|
| 20 |
+
API_URL = f"{BASE_URL}{ENTER_POINT}"
|
| 21 |
+
|
| 22 |
+
# 您的 API 密钥
|
| 23 |
+
# 注意:在实际应用中,API 密钥不应硬编码在代码中,应通过环境变量等方式安全管理。
|
| 24 |
+
API_KEY = os.getenv("API_KEY", "YOUR_ACTUAL_API_KEY_HERE")
|
| 25 |
+
MODEL_NAME = os.getenv("MODEL_NAME", "gemini-2.5-flash-preview-05-20")
|
| 26 |
+
|
| 27 |
+
def call_openai_compatible_api(prompt_message: str):
|
| 28 |
+
print("正在调用 OpenAI 兼容 API...")
|
| 29 |
+
print('prompt_message',prompt_message)
|
| 30 |
+
"""
|
| 31 |
+
调用 OpenAI 兼容 API 的示例函数。
|
| 32 |
+
"""
|
| 33 |
+
headers = {
|
| 34 |
+
"Content-Type": "application/json",
|
| 35 |
+
"Authorization": f"Bearer {API_KEY}",
|
| 36 |
+
"accept-encoding": "identity"
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
payload = {
|
| 40 |
+
"model": f"{MODEL_NAME}", # 替换为您希望使用的模型名称,例如 "gpt-3.5-turbo", "glm-4" 等
|
| 41 |
+
"messages": [
|
| 42 |
+
{"role": "system", "content": "你是一个有用的助手。"},
|
| 43 |
+
{"role": "user", "content": prompt_message}
|
| 44 |
+
],
|
| 45 |
+
"stream": True # 如果需要流式响应,可以设置为 True
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
print(f"正在向代理服务器发送请求: {API_URL}")
|
| 49 |
+
print(f"请求头部: {headers}")
|
| 50 |
+
print(f"请求体: {json.dumps(payload, indent=2, ensure_ascii=False)}")
|
| 51 |
+
|
| 52 |
+
try:
|
| 53 |
+
response = requests.post(API_URL, headers=headers, json=payload, stream=payload.get("stream", False))
|
| 54 |
+
response.raise_for_status() # 如果请求返回了错误状态码,会抛出 HTTPError
|
| 55 |
+
|
| 56 |
+
print("\n代理服务器响应:")
|
| 57 |
+
if payload.get("stream"):
|
| 58 |
+
full_response_content = ""
|
| 59 |
+
for line in response.iter_lines():
|
| 60 |
+
if line:
|
| 61 |
+
decoded_line = line.decode('utf-8')
|
| 62 |
+
if decoded_line.startswith("data:"):
|
| 63 |
+
json_data = decoded_line[len("data:"):].strip()
|
| 64 |
+
if json_data == "[DONE]":
|
| 65 |
+
break
|
| 66 |
+
try:
|
| 67 |
+
parsed_json = json.loads(json_data)
|
| 68 |
+
# 打印或处理每个流式块
|
| 69 |
+
# print(json.dumps(parsed_json, indent=2, ensure_ascii=False))
|
| 70 |
+
# 假设我们只关心 content
|
| 71 |
+
print(json.dumps(parsed_json, indent=2, ensure_ascii=False))
|
| 72 |
+
full_response_content += json_data # 收集原始JSON数据
|
| 73 |
+
except json.JSONDecodeError as e:
|
| 74 |
+
print(f"JSON 解析错误: {e} - {json_data}")
|
| 75 |
+
print("\n--- 流式响应结束 ---")
|
| 76 |
+
return {"raw_stream_data": full_response_content} # 返回一个包含原始流数据的字典
|
| 77 |
+
else:
|
| 78 |
+
print(json.dumps(response.json(), indent=2, ensure_ascii=False))
|
| 79 |
+
return response.json()
|
| 80 |
+
|
| 81 |
+
except requests.exceptions.HTTPError as http_err:
|
| 82 |
+
print(f"HTTP 错误发生: {http_err}")
|
| 83 |
+
print(f"响应状态码: {response.status_code}")
|
| 84 |
+
print(f"响应体: {response.text}")
|
| 85 |
+
except requests.exceptions.ConnectionError as conn_err:
|
| 86 |
+
print(f"连接错误发生: {conn_err}")
|
| 87 |
+
print("请确保代理服务器 (anyapi.py) 正在运行。")
|
| 88 |
+
except requests.exceptions.Timeout as timeout_err:
|
| 89 |
+
print(f"请求超时: {timeout_err}")
|
| 90 |
+
except requests.exceptions.RequestException as req_err:
|
| 91 |
+
print(f"发生未知错误: {req_err}")
|
| 92 |
+
|
| 93 |
+
if __name__ == "__main__":
|
| 94 |
+
# 在运行此脚本之前,请确保:
|
| 95 |
+
# 1. anyapi.py 代理服务器正在运行。
|
| 96 |
+
# 2. 您已将 API_KEY 替换为您的实际 API 密钥。
|
| 97 |
+
# 3. 您已安装 requests 库 (pip install requests)。
|
| 98 |
+
|
| 99 |
+
user_query = "你好!请介绍一下你自己。"
|
| 100 |
+
call_openai_compatible_api(user_query)
|
| 101 |
+
|
| 102 |
+
# 您可以尝试不同的查询
|
| 103 |
+
# call_openai_compatible_api("请给我讲一个关于人工智能的笑话。")
|
note.md
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
http://localhost:7860/v1/https/open.bigmodel.cn/api/paas/v4/chat/completions
|
| 2 |
+
{
|
| 3 |
+
"model": "glm-4-flash",
|
| 4 |
+
"messages": [
|
| 5 |
+
{"role": "system", "content": "你是一个有用的助手"},
|
| 6 |
+
{"role": "user", "content": "你好!请介绍一下自己。"}
|
| 7 |
+
],
|
| 8 |
+
"stream": true
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
http://127.0.0.1:7860/v1/https/generativelanguage.googleapis.com/v1beta/openai/chat/completions
|
| 14 |
+
{
|
| 15 |
+
"model":"gemini-2.5-flash-preview-05-20",
|
| 16 |
+
"messages":[
|
| 17 |
+
{"role": "system", "content": "你是一个有用的助手"},
|
| 18 |
+
{"role": "user", "content": "你好!请介绍一下自己。"}
|
| 19 |
+
],
|
| 20 |
+
"stream": true
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
curl -v --location 'http://127.0.0.1:7860/v1/https/generativelanguage.googleapis.com/v1beta/openai/chat/completions' \
|
| 25 |
+
--header 'Authorization: Bearer xxx' \
|
| 26 |
+
--header 'accept-encoding: identity' \
|
| 27 |
+
--header 'User-Agent: PostmanRuntime/7.45.0' \
|
| 28 |
+
--header 'Content-Type: application/json' \
|
| 29 |
+
--data '{
|
| 30 |
+
"model":"gemini-2.5-flash-preview-05-20",
|
| 31 |
+
"messages":[
|
| 32 |
+
{"role": "system", "content": "你是一个有用的助手"},
|
| 33 |
+
{"role": "user", "content": "你好!请介绍一下自己。"}
|
| 34 |
+
],
|
| 35 |
+
"stream": false
|
| 36 |
+
}'
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
转发程序为:@/proxyserver.py
|
| 41 |
+
|
| 42 |
+
假设数据为:
|
| 43 |
+
{
|
| 44 |
+
"model":"gemini-2.5-flash-preview-05-20",
|
| 45 |
+
"messages":[
|
| 46 |
+
{"role": "system", "content": "你是一个有用的助手"},
|
| 47 |
+
{"role": "user", "content": "你好!请介绍一下自己。"}
|
| 48 |
+
],
|
| 49 |
+
"stream": false
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
使用postman发送时没有问题
|
| 53 |
+
|
| 54 |
+
使用curl 发送时,出现问题,数据无法返回,proxyserver 的log 为:
|
| 55 |
+
```bash
|
| 56 |
+
代理请求到 https://generativelanguage.googleapis.com/v1beta/openai/chat/completions
|
| 57 |
+
/home/tanbushi/miniconda3/envs/any-api/lib/python3.11/site-packages/urllib3/connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings
|
| 58 |
+
warnings.warn(
|
| 59 |
+
目标API响应状态码: 200
|
| 60 |
+
目标API响应体: {"choices":[{"finish_reason":"stop","index":0,"message":{"content":"你好!很高兴能与你交流。\n\n我是一个由 Google 训练的**大型语言模型**。我的主要目标是协助你,提供信息,并进行各种基于语言的任务。\n\n我可以做很多事情,比如:\n* **回答你的问题**:只要是我的知识范围内,我会尽力给你准确、有用的答案。\n* **撰写不同类型的内容**:无论是文章、邮件、诗歌、故事、代码,还是仅仅是帮你整理思绪,我都可以尝试。\n* **进行翻译**:在不同的语言之间进行文本转换。\n* **提供创意灵感**:当你需要一些新的想法或突破思维定势时,我可以提供帮助。\n* **总结信息**:将长篇内容提炼成简明扼要的要点。\n* **学习新知识**:帮助你理解复杂的概念或探索新的领域。\n* **进行对话**:与你进行自然的交流,解答你的疑惑或只是聊聊天。\n\n我没有身体,也没有个人情感、意识或观点。我的存在是为了处理和生成文本,并以最有用的方式为你服务。\n\n如果你有任何问题或需要...
|
| 61 |
+
127.0.0.1 - - [03/Aug/2025 12:01:11] "POST /v1/https/generativelanguage.googleapis.com/v1beta/openai/chat/completions HTTP/1.1" 200 -
|
| 62 |
+
```
|
| 63 |
+
|
| 64 |
+
|
proxyserver-fastapi.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, Request, HTTPException
|
| 2 |
+
from fastapi.responses import StreamingResponse, Response
|
| 3 |
+
import httpx
|
| 4 |
+
import uvicorn
|
| 5 |
+
import asyncio
|
| 6 |
+
|
| 7 |
+
app = FastAPI()
|
| 8 |
+
|
| 9 |
+
@app.api_route("/v1/{url_path:path}", methods=["GET", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"])
|
| 10 |
+
async def proxy_request(url_path: str, request: Request):
|
| 11 |
+
print(f"接收到的 url_path: {url_path}")
|
| 12 |
+
|
| 13 |
+
# url_path 示例: https/open.bigmodel.cn/api/paas/v4/chat/completions
|
| 14 |
+
# 找到第一个 '/' 的位置,分隔协议和域名
|
| 15 |
+
first_slash_idx = url_path.find('/')
|
| 16 |
+
if first_slash_idx == -1:
|
| 17 |
+
raise HTTPException(status_code=400, detail="无效的URL路径格式。期望协议/域名/路径。")
|
| 18 |
+
|
| 19 |
+
protocol = url_path[:first_slash_idx] # 'https'
|
| 20 |
+
print(f"解析出的协议: {protocol}")
|
| 21 |
+
|
| 22 |
+
# 找到第二个 '/' 的位置,分隔域名和实际的路径
|
| 23 |
+
second_slash_idx = url_path.find('/', first_slash_idx + 1)
|
| 24 |
+
|
| 25 |
+
if second_slash_idx == -1:
|
| 26 |
+
domain = url_path[first_slash_idx + 1:]
|
| 27 |
+
remaining_path = ''
|
| 28 |
+
else:
|
| 29 |
+
domain = url_path[first_slash_idx + 1:second_slash_idx]
|
| 30 |
+
remaining_path = url_path[second_slash_idx:]
|
| 31 |
+
|
| 32 |
+
target_url = f"{protocol}://{domain}{remaining_path}"
|
| 33 |
+
print(f"\n\n\n代理请求到 {target_url}")
|
| 34 |
+
|
| 35 |
+
# 转发原始请求的头部,排除 'Host' 头部以避免冲突
|
| 36 |
+
# FastAPI 的 request.headers 是不可变的,需要转换为字典
|
| 37 |
+
headers = {key: value for key, value in request.headers.items() if key.lower() != 'host'}
|
| 38 |
+
|
| 39 |
+
# 获取请求体
|
| 40 |
+
request_body = await request.body()
|
| 41 |
+
|
| 42 |
+
# 获取查询参数
|
| 43 |
+
query_params = request.query_params
|
| 44 |
+
|
| 45 |
+
async with httpx.AsyncClient(verify=True, follow_redirects=False) as client:
|
| 46 |
+
try:
|
| 47 |
+
# 使用 httpx 库向目标 URL 发送请求
|
| 48 |
+
resp = await client.request(
|
| 49 |
+
method=request.method,
|
| 50 |
+
url=target_url,
|
| 51 |
+
headers=headers,
|
| 52 |
+
content=request_body, # 使用 content 传递请求体
|
| 53 |
+
params=query_params,
|
| 54 |
+
timeout=30.0, # 设置超时时间为30秒
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
# 打印目标 API 返回的实际状态码和响应体,用于调试
|
| 58 |
+
print(f"目标API响应状态码: {resp.status_code}")
|
| 59 |
+
print(f"目标API响应体: {resp.text[:500]}...") # 打印前500个字符,避免过长
|
| 60 |
+
|
| 61 |
+
# 构建响应头部
|
| 62 |
+
excluded_headers = ['content-encoding'] # 保持与 Flask 版本一致
|
| 63 |
+
response_headers = {
|
| 64 |
+
name: value for name, value in resp.headers.items()
|
| 65 |
+
if name.lower() not in excluded_headers
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
# 返回流式响应内容
|
| 69 |
+
# httpx 的 .aiter_bytes() 返回异步迭代器
|
| 70 |
+
async def generate_response():
|
| 71 |
+
async for chunk in resp.aiter_bytes(chunk_size=8192):
|
| 72 |
+
yield chunk
|
| 73 |
+
|
| 74 |
+
return StreamingResponse(generate_response(), status_code=resp.status_code, headers=response_headers)
|
| 75 |
+
|
| 76 |
+
except httpx.RequestError as e:
|
| 77 |
+
error_detail = f"代理请求到 {target_url} 失败: {type(e).__name__} - {e}"
|
| 78 |
+
print(f"代理请求失败: {error_detail}")
|
| 79 |
+
if e.request:
|
| 80 |
+
print(f"请求信息: {e.request.method} {e.request.url}")
|
| 81 |
+
if hasattr(e, 'response') and e.response:
|
| 82 |
+
print(f"响应信息: {e.response.status_code} {e.response.text[:200]}...")
|
| 83 |
+
raise HTTPException(status_code=500, detail=error_detail)
|
| 84 |
+
|
| 85 |
+
# if __name__ == '__main__':
|
| 86 |
+
# # 提示:请确保您已激活 conda 环境 'any-api' (conda activate any-api)
|
| 87 |
+
# # 提示:请确保已安装 FastAPI, Uvicorn 和 httpx 库 (pip install fastapi uvicorn httpx)
|
| 88 |
+
# print(f"代理服务器正在 0.0.0.0:7860 上启动")
|
| 89 |
+
# uvicorn.run(app, host="0.0.0.0", port=7860)
|
proxyserver-flask.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import threading
|
| 2 |
+
import requests
|
| 3 |
+
from flask import Flask, request, jsonify, abort, Response
|
| 4 |
+
|
| 5 |
+
class ProxyServer:
|
| 6 |
+
def __init__(self, host='127.0.0.1', port=7860):
|
| 7 |
+
self.app = Flask(__name__)
|
| 8 |
+
self.host = host
|
| 9 |
+
self.port = port
|
| 10 |
+
self._setup_routes()
|
| 11 |
+
|
| 12 |
+
def _setup_routes(self):
|
| 13 |
+
# 定义一个通用的代理路由,捕获所有以 /v1/ 开头的请求路径
|
| 14 |
+
# 例如:/v1/https/open.bigmodel.cn/api/paas/v4/chat/completions
|
| 15 |
+
@self.app.route('/v1/<path:url_path>', methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'OPTIONS'])
|
| 16 |
+
def proxy_request(url_path):
|
| 17 |
+
print(f"接收到的 url_path: {url_path}")
|
| 18 |
+
|
| 19 |
+
# url_path 示例: https/open.bigmodel.cn/api/paas/v4/chat/completions
|
| 20 |
+
# 找到第一个 '/' 的位置,分隔协议和域名
|
| 21 |
+
first_slash_idx = url_path.find('/')
|
| 22 |
+
if first_slash_idx == -1:
|
| 23 |
+
abort(400, description="无效的URL路径格式。期望协议/域名/路径。")
|
| 24 |
+
|
| 25 |
+
protocol = url_path[:first_slash_idx] # 'https'
|
| 26 |
+
print(f"解析出的协议: {protocol}")
|
| 27 |
+
|
| 28 |
+
# 找到第二个 '/' 的位置,分隔域名和实际的路径
|
| 29 |
+
# 从 first_slash_idx + 1 开始查找,即从 'open.bigmodel.cn/api/paas/v4/chat/completions' 开始
|
| 30 |
+
second_slash_idx = url_path.find('/', first_slash_idx + 1)
|
| 31 |
+
|
| 32 |
+
if second_slash_idx == -1:
|
| 33 |
+
# 如果没有第二个斜杠,说明只有协议和域名,没有后续路径
|
| 34 |
+
domain = url_path[first_slash_idx + 1:] # 'open.bigmodel.cn'
|
| 35 |
+
remaining_path = ''
|
| 36 |
+
else:
|
| 37 |
+
domain = url_path[first_slash_idx + 1:second_slash_idx] # 'open.bigmodel.cn'
|
| 38 |
+
remaining_path = url_path[second_slash_idx:] # '/api/paas/v4/chat/completions'
|
| 39 |
+
|
| 40 |
+
target_url = f"{protocol}://{domain}{remaining_path}"
|
| 41 |
+
print(f"\n\n\n代理请求到 {target_url}")
|
| 42 |
+
|
| 43 |
+
# 转发原始请求的头部,排除 'Host' 头部以避免冲突
|
| 44 |
+
headers = {key: value for key, value in request.headers if key.lower() != 'host'}
|
| 45 |
+
|
| 46 |
+
try:
|
| 47 |
+
# 使用 requests 库向目标 URL 发送请求,并转发原始请求的方法、头部、数据和查询参数
|
| 48 |
+
# stream=True 用于流式传输响应,这对于代理大文件或保持连接非常有用
|
| 49 |
+
resp = requests.request(
|
| 50 |
+
method=request.method,
|
| 51 |
+
url=target_url,
|
| 52 |
+
headers=headers,
|
| 53 |
+
data=request.get_data(),
|
| 54 |
+
params=request.args,
|
| 55 |
+
allow_redirects=False, # 不允许 requests 自动处理重定向
|
| 56 |
+
verify=True # 禁用 SSL 证书验证,仅用于调试
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
# 打印目标 API 返回的实际状态码和响应体,用于调试
|
| 60 |
+
print(f"目标API响应状态码: {resp.status_code}")
|
| 61 |
+
print(f"目标API响应体: {resp.text[:500]}...") # 打印前500个字符,避免过长
|
| 62 |
+
|
| 63 |
+
# 构建响应头部,并确保 Content-Length 被移除,因为我们将使用流式传输
|
| 64 |
+
# 如果原始响应是分块编码,requests 会自动处理,但我们通过流式传输来确保 Flask 也以流式方式发送
|
| 65 |
+
excluded_headers = ['content-encoding']
|
| 66 |
+
response_headers = [
|
| 67 |
+
(name, value) for name, value in resp.headers.items()
|
| 68 |
+
if name.lower() not in excluded_headers
|
| 69 |
+
]
|
| 70 |
+
|
| 71 |
+
# 返回流式响应内容
|
| 72 |
+
# 使用 generate_response 函数来迭代响应内容,实现流式传输
|
| 73 |
+
def generate_response():
|
| 74 |
+
for chunk in resp.iter_content(chunk_size=8192):
|
| 75 |
+
yield chunk
|
| 76 |
+
|
| 77 |
+
response = Response(generate_response(), status=resp.status_code, headers=response_headers)
|
| 78 |
+
return response
|
| 79 |
+
|
| 80 |
+
except requests.exceptions.RequestException as e:
|
| 81 |
+
print(f"代理请求失败: {e}")
|
| 82 |
+
abort(500, description=f"代理请求到 {target_url} 失败: {e}")
|
| 83 |
+
|
| 84 |
+
def run(self):
|
| 85 |
+
print(f"代理服务器正在 {self.host}:{self.port} 上启动")
|
| 86 |
+
# Flask 默认在开发模式下会为每个请求创建一个新的线程,实现并发处理。
|
| 87 |
+
# 在生产环境中,建议使用 Gunicorn 或 uWSGI 等 WSGI 服务器来管理多线程/多进程。
|
| 88 |
+
self.app.run(host=self.host, port=self.port, debug=True) # 将 debug 模式设置为 True
|
| 89 |
+
|
| 90 |
+
if __name__ == '__main__':
|
| 91 |
+
# 提示:请确保您已激活 conda 环境 'any-api' (conda activate any-api)
|
| 92 |
+
# 提示:请确保已安装 Flask 和 requests 库 (pip install Flask requests)
|
| 93 |
+
proxy = ProxyServer(
|
| 94 |
+
host='0.0.0.0',
|
| 95 |
+
port=7860
|
| 96 |
+
)
|
| 97 |
+
proxy.run()
|
push.sh
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
git add .
|
| 2 |
+
git commit -m "update"
|
| 3 |
+
git push
|
readme.md
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AnyAPI 代理服务器
|
| 2 |
+
|
| 3 |
+
本项目旨在实现一个多线程的 Python 代理服务器,用于中转 HTTP 请求到各种兼容 OpenAI 接口的外部 API。它提供了一个统一的访问入口,并能够处理协议转换和简化客户端配置。
|
| 4 |
+
|
| 5 |
+
## 1. 项目简介
|
| 6 |
+
|
| 7 |
+
当用户访问中转主机 URL `http://127.0.0.1:7860/v1/{protocol}/{domain}/{path}` 时,代理服务器会将其解析并转发到实际的目标 URL `protocol://domain/path`。例如,`http://127.0.0.1:7860/v1/https/open.bigmodel.cn/api/paas/v4/chat/completions` 会被代理到 `https://open.bigmodel.cn/api/paas/v4/chat/completions`。
|
| 8 |
+
|
| 9 |
+
## 2. 项目结构
|
| 10 |
+
|
| 11 |
+
* `proxyserver-fastapi.py`: 基于 FastAPI 实现的代理服务器核心逻辑。它负责接收客户端请求,解析代理 URL,转发请求到目标 API,并将响应返回给客户端。支持多线程并发处理。
|
| 12 |
+
* `agent_example.py`: 一个 Python 客户端示例,演示如何通过代理服务器调用兼容 OpenAI 接口的 API。它从 `.env` 文件中读取配置信息。
|
| 13 |
+
* `requirements.txt`: 项目所需的 Python 依赖库列表。
|
| 14 |
+
* `.env`: 环境变量配置文件,用于 `agent_example.py` 的配置。
|
| 15 |
+
|
| 16 |
+
## 3. 技术栈
|
| 17 |
+
|
| 18 |
+
* **Python 3.12**
|
| 19 |
+
* **FastAPI**: 用于构建高性能的异步 API。
|
| 20 |
+
* **httpx**: 异步 HTTP 客户端,用于向目标 API 发送请求。
|
| 21 |
+
* **uvicorn**: ASGI 服务器,用于运行 FastAPI 应用。
|
| 22 |
+
* **requests**: (在 `agent_example.py` 中使用) 同步 HTTP 客户端。
|
| 23 |
+
* **python-dotenv**: 用于从 `.env` 文件加载环境变量。
|
| 24 |
+
* **Conda**: 环境管理工具。
|
| 25 |
+
|
| 26 |
+
## 4. 环境设置
|
| 27 |
+
|
| 28 |
+
1. **创建并激活 Conda 环境**:
|
| 29 |
+
```bash
|
| 30 |
+
conda create -n any-api python=3.12
|
| 31 |
+
conda activate any-api
|
| 32 |
+
```
|
| 33 |
+
|
| 34 |
+
2. **安装依赖**:
|
| 35 |
+
```bash
|
| 36 |
+
pip install -r requirements.txt
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
## 5. 配置说明
|
| 40 |
+
|
| 41 |
+
`agent_example.py` 通过 `.env` 文件读取配置。请确保 `.env` 文件存在于项目根目录,并包含以下变量:
|
| 42 |
+
|
| 43 |
+
```dotenv
|
| 44 |
+
# 代理服务器的基地址
|
| 45 |
+
# 示例: http://localhost:7860/v1/https/generativelanguage.googleapis.com/v1beta/openai
|
| 46 |
+
PROXY_BASE_URL="http://localhost:7860/v1/https/generativelanguage.googleapis.com/v1beta/openai"
|
| 47 |
+
|
| 48 |
+
# 目标 API 的入口点(相对于目标域名的路径)
|
| 49 |
+
# 示例: /chat/completions
|
| 50 |
+
TARGET_API_PATH="/chat/completions"
|
| 51 |
+
|
| 52 |
+
# 您的 API 密钥
|
| 53 |
+
YOUR_API_KEY="YOUR_ACTUAL_API_KEY_HERE"
|
| 54 |
+
|
| 55 |
+
# 使用的模型名称
|
| 56 |
+
MODEL_NAME="gemini-2.5-flash-preview-05-20"
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
请根据您的实际需求修改这些值。
|
| 60 |
+
|
| 61 |
+
## 6. 如何运行
|
| 62 |
+
|
| 63 |
+
### 6.1 启动代理服务器
|
| 64 |
+
|
| 65 |
+
在激活 `any-api` Conda 环境后,运行 `proxyserver-fastapi.py`:
|
| 66 |
+
|
| 67 |
+
```bash
|
| 68 |
+
uvicorn proxyserver-fastapi:app --host 0.0.0.0 --port 7860 --reload
|
| 69 |
+
```
|
| 70 |
+
或者直接运行文件(如果文件中包含 `if __name__ == '__main__':` 块):
|
| 71 |
+
```bash
|
| 72 |
+
python proxyserver-fastapi.py
|
| 73 |
+
```
|
| 74 |
+
服务器将在 `http://0.0.0.0:7860` 上监听请求。
|
| 75 |
+
|
| 76 |
+
### 6.2 运行客户端示例
|
| 77 |
+
|
| 78 |
+
在代理服务器运行后,您可以在另一个终端中运行 `agent_example.py` 来测试代理功能:
|
| 79 |
+
|
| 80 |
+
```bash
|
| 81 |
+
python agent_example.py
|
| 82 |
+
```
|
| 83 |
+
|
| 84 |
+
这将向代理服务器发送一个示例请求,并通过代理转发到配置的目标 API。
|
| 85 |
+
|
| 86 |
+
## 7. URL 格式说明
|
| 87 |
+
|
| 88 |
+
代理服务器期望的 URL 格式为:`http://<proxy_host>:<proxy_port>/v1/{protocol}/{domain}/{path}`
|
| 89 |
+
|
| 90 |
+
* `<proxy_host>:<proxy_port>`: 代理服务器的地址和端口,例如 `127.0.0.1:7860`。
|
| 91 |
+
* `v1`: 版本标识符。
|
| 92 |
+
* `{protocol}`: 目标 API 的协议(例如 `http` 或 `https`)。
|
| 93 |
+
* `{domain}`: 目标 API 的域名(例如 `open.bigmodel.cn`)。
|
| 94 |
+
* `{path}`: 目标 API 的具体路径(例如 `/api/paas/v4/chat/completions`)。
|
| 95 |
+
|
| 96 |
+
代理服务器会将此 URL 转换为 `protocol://domain/path` 并进行转发。
|
| 97 |
+
|
| 98 |
+
## 8. 注意事项
|
| 99 |
+
|
| 100 |
+
* **API 密钥安全**: 永远不要将 API 密钥硬编码到代码中。本项目使用 `.env` 文件进行管理,请确保 `.env` 文件不被提交到版本控制系统(已在 `.gitignore` 中配置)。
|
| 101 |
+
* **错误处理**: 代理服务器包含了基本的错误处理机制,但生产环境中可能需要更健壮的日志记录和监控。
|
| 102 |
+
* **性能**: FastAPI 和 httpx 提供了异步能力,有助于提高并发性能。Python GIL 对 CPU 密集型任务有影响,但对于 I/O 密集型代理任务影响较小。
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flask
|
| 2 |
+
httpx
|
| 3 |
+
fastapi
|
| 4 |
+
uvicorn
|
| 5 |
+
requests
|
| 6 |
+
python-dotenv
|