|
|
|
|
|
import json |
|
|
import os |
|
|
import time |
|
|
import uuid |
|
|
import asyncio |
|
|
from typing import Any, Dict, List, Optional, AsyncGenerator |
|
|
from contextlib import asynccontextmanager |
|
|
|
|
|
import httpx |
|
|
from fastapi import FastAPI, HTTPException, Depends, BackgroundTasks |
|
|
from fastapi.responses import StreamingResponse |
|
|
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials |
|
|
from fastapi.middleware.cors import CORSMiddleware |
|
|
from pydantic import BaseModel, Field |
|
|
import uvicorn |
|
|
|
|
|
|
|
|
|
|
|
class ChatMessage(BaseModel): |
|
|
"""OpenAI格式的消息""" |
|
|
role: str |
|
|
content: str |
|
|
|
|
|
|
|
|
class ChatCompletionRequest(BaseModel): |
|
|
"""OpenAI格式的聊天请求""" |
|
|
model: str |
|
|
messages: List[ChatMessage] |
|
|
stream: bool = False |
|
|
temperature: Optional[float] = 0.7 |
|
|
max_tokens: Optional[int] = None |
|
|
|
|
|
|
|
|
class StreamChoice(BaseModel): |
|
|
"""流式响应选项""" |
|
|
delta: Dict[str, Any] = Field(default_factory=dict) |
|
|
index: int = 0 |
|
|
finish_reason: Optional[str] = None |
|
|
|
|
|
|
|
|
class StreamResponse(BaseModel): |
|
|
"""OpenAI格式的流式响应""" |
|
|
id: str = Field(default_factory=lambda: f"chatcmpl-{uuid.uuid4().hex}") |
|
|
object: str = "chat.completion.chunk" |
|
|
created: int = Field(default_factory=lambda: int(time.time())) |
|
|
model: str |
|
|
choices: List[StreamChoice] |
|
|
|
|
|
|
|
|
class ModelInfo(BaseModel): |
|
|
"""模型信息""" |
|
|
id: str |
|
|
object: str = "model" |
|
|
created: int = Field(default_factory=lambda: int(time.time())) |
|
|
owned_by: str = "appigence" |
|
|
|
|
|
|
|
|
class HealthCheck(BaseModel): |
|
|
"""健康检查响应""" |
|
|
status: str |
|
|
timestamp: int |
|
|
version: str = "1.0.0" |
|
|
models_available: List[str] |
|
|
|
|
|
|
|
|
|
|
|
class HTTPClientManager: |
|
|
"""HTTP客户端管理器 - 单例模式,遵循DRY原则""" |
|
|
|
|
|
def __init__(self): |
|
|
self.client: Optional[httpx.AsyncClient] = None |
|
|
self.semaphore = asyncio.Semaphore(50) |
|
|
|
|
|
async def get_client(self) -> httpx.AsyncClient: |
|
|
"""获取HTTP客户端实例""" |
|
|
if self.client is None: |
|
|
self.client = httpx.AsyncClient( |
|
|
timeout=httpx.Timeout(300.0), |
|
|
limits=httpx.Limits( |
|
|
max_keepalive_connections=20, |
|
|
max_connections=100 |
|
|
) |
|
|
) |
|
|
return self.client |
|
|
|
|
|
async def close(self): |
|
|
"""关闭HTTP客户端""" |
|
|
if self.client: |
|
|
await self.client.aclose() |
|
|
self.client = None |
|
|
|
|
|
|
|
|
|
|
|
http_manager = HTTPClientManager() |
|
|
|
|
|
|
|
|
|
|
|
class AppigenceHandler: |
|
|
""" |
|
|
Appigence API处理器 |
|
|
专注于Appigence API的所有交互逻辑 |
|
|
""" |
|
|
|
|
|
def __init__(self): |
|
|
self.model_mapping = { |
|
|
"gpt-4": "gpt-4o", |
|
|
"gpt-4-turbo": "gpt-4o", |
|
|
"gpt-3.5-turbo": "gpt-4o-mini", |
|
|
} |
|
|
self.supported_models = [ |
|
|
"gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo", |
|
|
"gpt-4", "gpt-4-turbo" |
|
|
] |
|
|
self.api_url = "https://api.appigence.com/chat" |
|
|
self.headers = { |
|
|
"Host": "api.appigence.com", |
|
|
"Content-Type": "application/json", |
|
|
"Connection": "keep-alive", |
|
|
"Accept": "*/*", |
|
|
"User-Agent": "ChatWise/1.2.16 CFNetwork/1410.0.3 Darwin/22.6.0", |
|
|
"Accept-Language": "zh-CN,zh-Hans;q=0.9", |
|
|
"Accept-Encoding": "gzip, deflate, br" |
|
|
} |
|
|
|
|
|
def get_supported_models(self) -> List[str]: |
|
|
"""获取支持的模型列表""" |
|
|
return self.supported_models |
|
|
|
|
|
def get_model_info(self, model_id: str) -> ModelInfo: |
|
|
"""获取模型信息""" |
|
|
return ModelInfo(id=model_id, owned_by="appigence") |
|
|
|
|
|
def _convert_request(self, request: ChatCompletionRequest) -> Dict[str, Any]: |
|
|
"""将OpenAI格式转换为Appigence格式 - 遵循DRY原则""" |
|
|
conversation = [] |
|
|
for msg in request.messages: |
|
|
role = "user" if msg.role == "system" else msg.role |
|
|
conversation.append({ |
|
|
"content": msg.content, |
|
|
"role": role |
|
|
}) |
|
|
|
|
|
model_name = self.model_mapping.get(request.model, request.model) |
|
|
|
|
|
return { |
|
|
"isPremium": True, |
|
|
"modelName": model_name, |
|
|
"userConversation": conversation |
|
|
} |
|
|
|
|
|
def _parse_sse_line(self, line: str) -> Optional[Dict[str, Any]]: |
|
|
"""解析SSE数据行""" |
|
|
if not line: |
|
|
return None |
|
|
try: |
|
|
return json.loads(line) |
|
|
except json.JSONDecodeError: |
|
|
return None |
|
|
|
|
|
def _extract_content_delta(self, data: Dict[str, Any]) -> Optional[str]: |
|
|
"""提取内容增量""" |
|
|
try: |
|
|
choices = data.get("choices", []) |
|
|
if choices and len(choices) > 0: |
|
|
delta = choices[0].get("delta", {}) |
|
|
return delta.get("content", "") |
|
|
except (KeyError, IndexError, TypeError): |
|
|
return None |
|
|
|
|
|
def _is_finished(self, data: Dict[str, Any]) -> bool: |
|
|
"""检查流是否结束""" |
|
|
try: |
|
|
choices = data.get("choices", []) |
|
|
if choices and len(choices) > 0: |
|
|
return choices[0].get("finish_reason") == "stop" |
|
|
except (KeyError, IndexError, TypeError): |
|
|
return False |
|
|
return False |
|
|
|
|
|
async def handle_stream_request( |
|
|
self, |
|
|
request: ChatCompletionRequest |
|
|
) -> AsyncGenerator[str, None]: |
|
|
"""处理流式请求 - 支持高并发""" |
|
|
appigence_request = self._convert_request(request) |
|
|
|
|
|
stream_id = f"chatcmpl-{uuid.uuid4().hex}" |
|
|
created_time = int(time.time()) |
|
|
|
|
|
|
|
|
yield f"data: {json.dumps({'id': stream_id, 'object': 'chat.completion.chunk', 'created': created_time, 'model': request.model, 'choices': [{'index': 0, 'delta': {'role': 'assistant'}, 'finish_reason': None}]})}\n\n" |
|
|
|
|
|
async with http_manager.semaphore: |
|
|
try: |
|
|
client = await http_manager.get_client() |
|
|
async with client.stream( |
|
|
"POST", |
|
|
self.api_url, |
|
|
json=appigence_request, |
|
|
headers=self.headers |
|
|
) as response: |
|
|
response.raise_for_status() |
|
|
|
|
|
async for line in response.aiter_lines(): |
|
|
if not line: |
|
|
continue |
|
|
|
|
|
data = self._parse_sse_line(line) |
|
|
if not data: |
|
|
continue |
|
|
|
|
|
content_delta = self._extract_content_delta(data) |
|
|
|
|
|
if content_delta: |
|
|
delta_response = StreamResponse( |
|
|
id=stream_id, |
|
|
created=created_time, |
|
|
model=request.model, |
|
|
choices=[StreamChoice(delta={"content": content_delta})] |
|
|
) |
|
|
yield f"data: {delta_response.json()}\n\n" |
|
|
|
|
|
if self._is_finished(data): |
|
|
break |
|
|
|
|
|
|
|
|
finish_response = StreamResponse( |
|
|
id=stream_id, |
|
|
created=created_time, |
|
|
model=request.model, |
|
|
choices=[StreamChoice(delta={}, finish_reason="stop")] |
|
|
) |
|
|
yield f"data: {finish_response.json()}\n\n" |
|
|
yield "data: [DONE]\n\n" |
|
|
|
|
|
except httpx.HTTPStatusError as e: |
|
|
error_msg = f"Appigence API error: {e.response.status_code}" |
|
|
yield f"data: {json.dumps({'error': error_msg})}\n\n" |
|
|
except Exception as e: |
|
|
error_msg = f"Internal error: {str(e)}" |
|
|
yield f"data: {json.dumps({'error': error_msg})}\n\n" |
|
|
|
|
|
async def handle_non_stream_request( |
|
|
self, |
|
|
request: ChatCompletionRequest |
|
|
) -> Dict[str, Any]: |
|
|
"""处理非流式请求""" |
|
|
appigence_request = self._convert_request(request) |
|
|
content_pieces = [] |
|
|
|
|
|
async with http_manager.semaphore: |
|
|
client = await http_manager.get_client() |
|
|
async with client.stream( |
|
|
"POST", |
|
|
self.api_url, |
|
|
json=appigence_request, |
|
|
headers=self.headers |
|
|
) as response: |
|
|
response.raise_for_status() |
|
|
|
|
|
async for line in response.aiter_lines(): |
|
|
if not line: |
|
|
continue |
|
|
|
|
|
data = self._parse_sse_line(line) |
|
|
if not data: |
|
|
continue |
|
|
|
|
|
content_delta = self._extract_content_delta(data) |
|
|
if content_delta: |
|
|
content_pieces.append(content_delta) |
|
|
|
|
|
if self._is_finished(data): |
|
|
break |
|
|
|
|
|
content = "".join(content_pieces) |
|
|
|
|
|
return { |
|
|
"id": f"chatcmpl-{uuid.uuid4().hex}", |
|
|
"object": "chat.completion", |
|
|
"created": int(time.time()), |
|
|
"model": request.model, |
|
|
"choices": [{ |
|
|
"message": {"role": "assistant", "content": content}, |
|
|
"index": 0, |
|
|
"finish_reason": "stop" |
|
|
}], |
|
|
"usage": { |
|
|
"prompt_tokens": 0, |
|
|
"completion_tokens": 0, |
|
|
"total_tokens": 0 |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
@asynccontextmanager |
|
|
async def lifespan(app: FastAPI): |
|
|
"""应用生命周期管理 - 遵循YAGNI原则""" |
|
|
|
|
|
yield |
|
|
|
|
|
await http_manager.close() |
|
|
|
|
|
|
|
|
|
|
|
app = FastAPI( |
|
|
title="Appigence OpenAI API Adapter", |
|
|
description="高性能Appigence API适配器,支持OpenAI格式调用", |
|
|
version="1.0.0", |
|
|
lifespan=lifespan |
|
|
) |
|
|
|
|
|
|
|
|
app.add_middleware( |
|
|
CORSMiddleware, |
|
|
allow_origins=["*"], |
|
|
allow_credentials=True, |
|
|
allow_methods=["*"], |
|
|
allow_headers=["*"], |
|
|
) |
|
|
|
|
|
|
|
|
handler = AppigenceHandler() |
|
|
|
|
|
|
|
|
security = HTTPBearer(auto_error=False) |
|
|
|
|
|
async def get_api_key(credentials: Optional[HTTPAuthorizationCredentials] = Depends(security)): |
|
|
"""API密钥验证 - 可选功能""" |
|
|
|
|
|
required_key = os.getenv("API_KEY") |
|
|
if required_key: |
|
|
if not credentials or credentials.credentials != required_key: |
|
|
raise HTTPException(status_code=401, detail="Invalid API key") |
|
|
return credentials |
|
|
|
|
|
|
|
|
|
|
|
@app.get("/", response_model=Dict[str, str]) |
|
|
async def root(): |
|
|
"""根端点 - 遵循KISS原则""" |
|
|
return { |
|
|
"message": "Appigence OpenAI API Adapter", |
|
|
"version": "1.0.0", |
|
|
"docs": "/docs" |
|
|
} |
|
|
|
|
|
|
|
|
@app.get("/health", response_model=HealthCheck) |
|
|
async def health_check(): |
|
|
"""健康检查端点""" |
|
|
return HealthCheck( |
|
|
status="healthy", |
|
|
timestamp=int(time.time()), |
|
|
models_available=handler.get_supported_models() |
|
|
) |
|
|
|
|
|
|
|
|
@app.get("/v1/models") |
|
|
async def list_models(api_key: Optional[HTTPAuthorizationCredentials] = Depends(get_api_key)): |
|
|
"""列出所有可用模型""" |
|
|
models = [ |
|
|
handler.get_model_info(model_id).dict() |
|
|
for model_id in handler.get_supported_models() |
|
|
] |
|
|
return {"object": "list", "data": models} |
|
|
|
|
|
|
|
|
@app.post("/v1/chat/completions") |
|
|
async def chat_completions( |
|
|
request: ChatCompletionRequest, |
|
|
background_tasks: BackgroundTasks, |
|
|
api_key: Optional[HTTPAuthorizationCredentials] = Depends(get_api_key) |
|
|
): |
|
|
""" |
|
|
处理聊天完成请求 - 统一入口点 |
|
|
支持流式和非流式响应 |
|
|
""" |
|
|
if not request.messages: |
|
|
raise HTTPException(status_code=400, detail="Messages required") |
|
|
|
|
|
|
|
|
if request.model not in handler.get_supported_models(): |
|
|
raise HTTPException( |
|
|
status_code=400, |
|
|
detail=f"Unsupported model: {request.model}. Supported models: {handler.get_supported_models()}" |
|
|
) |
|
|
|
|
|
try: |
|
|
if request.stream: |
|
|
|
|
|
return StreamingResponse( |
|
|
handler.handle_stream_request(request), |
|
|
media_type="text/event-stream", |
|
|
headers={ |
|
|
"Cache-Control": "no-cache", |
|
|
"Connection": "keep-alive", |
|
|
"X-Accel-Buffering": "no" |
|
|
} |
|
|
) |
|
|
else: |
|
|
|
|
|
response = await handler.handle_non_stream_request(request) |
|
|
return response |
|
|
|
|
|
except httpx.HTTPStatusError as e: |
|
|
raise HTTPException( |
|
|
status_code=e.response.status_code, |
|
|
detail=f"Backend API error: {e.response.text}" |
|
|
) |
|
|
except Exception as e: |
|
|
raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}") |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
port = int(os.getenv("PORT", 7860)) |
|
|
|
|
|
print(f"🚀 Starting Appigence API Adapter on port {port}") |
|
|
print(f"📚 API Documentation: http://localhost:{port}/docs") |
|
|
print(f"❤️ Health Check: http://localhost:{port}/health") |
|
|
|
|
|
uvicorn.run( |
|
|
"appigence_api:app", |
|
|
host="0.0.0.0", |
|
|
port=port, |
|
|
workers=1, |
|
|
log_level="info", |
|
|
access_log=True |
|
|
) |