import warnings
warnings.filterwarnings('ignore')
import json
import os
import argparse
import requests
import time
import functools
import asyncio
# 在文件顶部导入aiohttp，确保所有进程都能访问
import aiohttp
from fastapi import FastAPI, Request, Form, File, UploadFile, Depends, HTTPException, BackgroundTasks, Security, status
from fastapi.responses import StreamingResponse, HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.gzip import GZipMiddleware
from pydantic import BaseModel
from typing import List, Optional, Dict, Any
import uvicorn
from sse_starlette.sse import EventSourceResponse
from contextlib import asynccontextmanager
from fastapi.security.api_key import APIKeyHeader

# 添加请求限流器
request_semaphore = asyncio.Semaphore(200)  # 允许同时处理200个请求

# 在Fastapi_vllm.py中添加全局连接池
aiohttp_session = None

# 添加应用生命周期管理器
@asynccontextmanager
async def lifespan(app):
    global aiohttp_session
    aiohttp_session = aiohttp.ClientSession(
        connector=aiohttp.TCPConnector(
            limit=200,  # 最大连接数
            limit_per_host=100  # 每个主机的最大连接数
        )
    )
    # 启动时执行的代码
    print("初始化高性能服务器配置...")
    yield
    # 关闭时执行的代码
    print("关闭并清理资源...")
    if aiohttp_session:
        await aiohttp_session.close()

# 命令行参数解析
parser = argparse.ArgumentParser()
parser.add_argument("--base_model", type=str, default=None, help="基础模型路径")
parser.add_argument("--adapter_path", type=str, required=True, help="LoRA适配器路径")
parser.add_argument("--port", type=int, default=7860, help="Web服务端口")
parser.add_argument("--force_vocab_size", type=int, default=None, help="强制设置词汇表大小")
parser.add_argument("--vllm_url", type=str, default="http://localhost:8000", help="vLLM服务URL")
parser.add_argument("--vllm_api_key", type=str, required=True, help="vLLM API密钥（必须与服务端一致）")
parser.add_argument("--workers", type=int, default=0, help="Uvicorn工作进程数，0表示自动设置")
args = parser.parse_args()

# 创建FastAPI应用
app = FastAPI(
    title="法律顾问AI API",
    description="基于大语言模型的法律顾问API，提供法律问题的回答和建议",
    version="1.0.0",
    lifespan=lifespan
)

# 添加CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 添加GZip压缩中间件
app.add_middleware(GZipMiddleware, minimum_size=500)

# 设置模板和静态文件
templates = Jinja2Templates(directory="templates")

# 前端静态文件生成
def create_frontend_files():
    """创建前端所需的模板和静态文件"""
    # 创建目录
    os.makedirs("templates", exist_ok=True)
    os.makedirs("static", exist_ok=True)
    os.makedirs("static/css", exist_ok=True)
    os.makedirs("static/js", exist_ok=True)
    
    # 创建HTML模板
    html_template = """
    <!DOCTYPE html>
    <html lang="zh-CN">
    <head>
        <meta charset="UTF-8">
        <meta name="viewport" content="width=device-width, initial-scale=1.0">
        <title>{{ title }}</title>
        <link rel="stylesheet" href="/static/css/style.css">
    </head>
    <body>
        <div class="chat-container">
            <div class="chat-header">
                <h1>法律顾问AI</h1>
            </div>
            <div class="chat-messages" id="chatMessages">
                <div class="message system">
                    <div class="message-content">
                        您好，我是法律顾问AI。请提出您的法律问题，我将尽力提供专业的法律建议。
                    </div>
                </div>
            </div>
            <div class="chat-input">
                <textarea id="userInput" placeholder="请输入您的法律问题..." rows="3"></textarea>
                <button id="sendBtn">发送</button>
            </div>
        </div>
        <script src="/static/js/chat.js"></script>
    </body>
    </html>
    """
    
    # 创建CSS样式
    css_style = """
    * {
        margin: 0;
        padding: 0;
        box-sizing: border-box;
        font-family: "PingFang SC", "Microsoft YaHei", sans-serif;
    }
    
    body {
        background-color: #f5f5f5;
        display: flex;
        justify-content: center;
        align-items: center;
        min-height: 100vh;
        padding: 20px;
    }
    
    .chat-container {
        width: 100%;
        max-width: 800px;
        background-color: #fff;
        border-radius: 12px;
        box-shadow: 0 4px 20px rgba(0, 0, 0, 0.1);
        overflow: hidden;
        display: flex;
        flex-direction: column;
        height: 80vh;
    }
    
    .chat-header {
        padding: 16px 20px;
        background-color: #4285f4;
        color: white;
        text-align: center;
    }
    
    .chat-messages {
        flex: 1;
        overflow-y: auto;
        padding: 20px;
        display: flex;
        flex-direction: column;
        gap: 16px;
    }
    
    .message {
        max-width: 80%;
        padding: 12px 16px;
        border-radius: 16px;
        line-height: 1.5;
    }
    
    .message.system {
        align-self: center;
        background-color: #f0f0f0;
        border-radius: 8px;
        padding: 8px 16px;
        color: #666;
        max-width: 90%;
        margin-bottom: 16px;
    }
    
    .message.user {
        align-self: flex-end;
        background-color: #e3f2fd;
        border-radius: 16px 4px 16px 16px;
    }
    
    .message.assistant {
        align-self: flex-start;
        background-color: #f5f5f5;
        border-radius: 4px 16px 16px 16px;
    }
    
    .chat-input {
        padding: 16px;
        border-top: 1px solid #eee;
        display: flex;
        gap: 10px;
    }
    
    .chat-input textarea {
        flex: 1;
        padding: 12px;
        border: 1px solid #ddd;
        border-radius: 8px;
        resize: none;
        outline: none;
        font-size: 14px;
    }
    
    .chat-input button {
        padding: 0 24px;
        background-color: #4285f4;
        color: white;
        border: none;
        border-radius: 8px;
        cursor: pointer;
        font-size: 14px;
        transition: background-color 0.2s;
    }
    
    .chat-input button:hover {
        background-color: #3367d6;
    }
    
    .typing {
        display: flex;
        align-items: center;
        gap: 5px;
        padding: 5px 10px;
    }
    
    .typing-dot {
        width: 8px;
        height: 8px;
        background-color: #aaa;
        border-radius: 50%;
        animation: typing-dot 1.4s infinite ease-in-out both;
    }
    
    .typing-dot:nth-child(1) { animation-delay: -0.32s; }
    .typing-dot:nth-child(2) { animation-delay: -0.16s; }
    
    @keyframes typing-dot {
        0%, 80%, 100% { transform: scale(0); }
        40% { transform: scale(1); }
    }
    """
    
    # 创建JavaScript文件
    js_script = """
    document.addEventListener('DOMContentLoaded', () => {
        const messagesContainer = document.getElementById('chatMessages');
        const userInput = document.getElementById('userInput');
        const sendBtn = document.getElementById('sendBtn');
        
        // 存储对话历史
        let chatHistory = [];
        
        // 处理发送消息
        function sendMessage() {
            const message = userInput.value.trim();
            if (!message) return;
            
            // 添加用户消息到界面
            addMessage('user', message);
            
            // 清空输入框
            userInput.value = '';
            
            // 准备请求数据
            const messages = [
                { role: "system", content: "你是一位专业的法律顾问，精通各类法律法规。请提供准确、客观的法律建议，并在必要时引用相关法条。请用中文回复。" }
            ];
            
            // 添加历史消息
            chatHistory.forEach(item => {
                messages.push({ role: item.role, content: item.content });
            });
            
            // 添加当前用户消息
            messages.push({ role: "user", content: message });
            
            // 保存到历史
            chatHistory.push({ role: "user", content: message });
            
            // 显示加载状态
            const typingIndicator = document.createElement('div');
            typingIndicator.className = 'message assistant typing';
            typingIndicator.innerHTML = '<div class="typing-dot"></div><div class="typing-dot"></div><div class="typing-dot"></div>';
            messagesContainer.appendChild(typingIndicator);
            messagesContainer.scrollTop = messagesContainer.scrollHeight;
            
            // 使用fetch发送请求，添加API密钥
            fetch('/api/chat/stream', {
                method: 'POST',
                headers: {
                    'Content-Type': 'application/json',
                    'X-API-Key': 'sk123582123'  // 添加API密钥
                },
                body: JSON.stringify({
                    messages: messages.map(m => ({ role: m.role, content: m.content }))
                })
            })
            .then(response => {
                const reader = response.body.getReader();
                const decoder = new TextDecoder();
                
                let assistantMessage = '';
                const messageElement = document.createElement('div');
                messageElement.className = 'message assistant';
                
                // 移除加载指示器
                if (typingIndicator.parentNode) {
                    messagesContainer.removeChild(typingIndicator);
                }
                
                function readStream() {
                    return reader.read().then(({ done, value }) => {
                        if (done) {
                            // 流结束，保存到历史
                            if (assistantMessage) {
                                chatHistory.push({ role: "assistant", content: assistantMessage });
                            }
                            return;
                        }
                        
                        // 处理接收到的数据
                        const chunk = decoder.decode(value, { stream: true });
                        const lines = chunk.split('\n');
                        
                        for (const line of lines) {
                            if (line.startsWith('data:')) {
                                try {
                                    const data = JSON.parse(line.slice(5).trim());
                                    assistantMessage = data.text;
                                    
                                    // 更新消息内容
                                    if (!messageElement.parentNode) {
                                        messageElement.innerHTML = `<div class="message-content">${assistantMessage}</div>`;
                                        messagesContainer.appendChild(messageElement);
                                    } else {
                                        messageElement.querySelector('.message-content').innerHTML = assistantMessage;
                                    }
                                    
                                    // 滚动到底部
                                    messagesContainer.scrollTop = messagesContainer.scrollHeight;
                                } catch (error) {
                                    console.error('解析数据错误:', error);
                                }
                            }
                        }
                        
                        // 继续读取流
                        return readStream();
                    });
                }
                
                readStream().catch(error => {
                    console.error('读取流错误:', error);
                    // 如果没有收到任何消息，显示错误
                    if (!assistantMessage) {
                        addMessage('assistant', '抱歉，服务器响应出错，请稍后再试。');
                    }
                });
            })
            .catch(error => {
                console.error('请求错误:', error);
                // 移除加载指示器
                if (typingIndicator.parentNode) {
                    messagesContainer.removeChild(typingIndicator);
                }
                addMessage('assistant', '抱歉，服务器响应出错，请稍后再试。');
            });
        }
        
        // 添加消息到界面
        function addMessage(role, content) {
            const messageElement = document.createElement('div');
            messageElement.className = `message ${role}`;
            messageElement.innerHTML = `<div class="message-content">${content}</div>`;
            messagesContainer.appendChild(messageElement);
            messagesContainer.scrollTop = messagesContainer.scrollHeight;
        }
        
        // 事件监听
        sendBtn.addEventListener('click', sendMessage);
        userInput.addEventListener('keydown', (e) => {
            if (e.key === 'Enter' && !e.shiftKey) {
                e.preventDefault();
                sendMessage();
            }
        });
    });
    """
    
    # 写入文件
    with open("templates/chat.html", "w", encoding="utf-8") as f:
        f.write(html_template)
    
    with open("static/css/style.css", "w", encoding="utf-8") as f:
        f.write(css_style)
    
    with open("static/js/chat.js", "w", encoding="utf-8") as f:
        f.write(js_script)

# 创建前端文件
create_frontend_files()

app.mount("/static", StaticFiles(directory="static"), name="static")

# 请求和响应模型
class ChatMessage(BaseModel):
    role: str
    content: str

class ChatRequest(BaseModel):
    messages: List[ChatMessage]
    stream: bool = True
    temperature: float = 0.2
    max_tokens: int = 512  # 从1024减少到512，加快响应速度
    top_p: float = 0.85
    top_k: int = 40
    repetition_penalty: float = 1.15
    presence_penalty: float = 0.2
    frequency_penalty: float = 0.2

class ChatResponse(BaseModel):
    response: str
    
# 用于存储聊天历史的简单内存存储
chat_histories = {}

# 简单的内存缓存
response_cache = {}
CACHE_MAX_SIZE = 5000  # 增加到5000条
CACHE_TTL = 7200  # 增加到2小时

def get_cache_key(message, history):
    """生成缓存键"""
    key = message
    for user_msg, assist_msg in history:
        key += "|" + user_msg[:50]
    return hash(key)

def get_cached_response(cache_key):
    """获取缓存的响应"""
    if cache_key in response_cache:
        entry = response_cache[cache_key]
        if time.time() - entry['timestamp'] < CACHE_TTL:
            return entry['response']
        # 过期移除
        del response_cache[cache_key]
    return None

def cache_response(cache_key, response):
    """缓存响应"""
    # 如果缓存已满，移除最早的条目
    if len(response_cache) >= CACHE_MAX_SIZE:
        oldest_key = min(response_cache.keys(), key=lambda k: response_cache[k]['timestamp'])
        del response_cache[oldest_key]
    
    response_cache[cache_key] = {
        'response': response,
        'timestamp': time.time()
    }

# 核心生成函数
async def generate_response(message: str, history: List[List[str]]):
    # 检查缓存
    cache_key = get_cache_key(message, history)
    cached_response = get_cached_response(cache_key)
    if cached_response:
        yield cached_response
        return

    # 使用Qwen的聊天模板构建对话
    messages = []
    
    # 添加系统消息 - 简化提示词减少token
    messages.append({"role": "system", "content": "你是专业法律顾问。提供准确客观的法律建议，必要时引用法条。直接回答问题，不要重复用户问题。"})
    
    # 添加历史对话 - 限制历史长度
    for user_msg, assistant_msg in history[-3:]:  # 只保留最近3轮对话
        messages.append({"role": "user", "content": user_msg})
        messages.append({"role": "assistant", "content": assistant_msg})
    
    # 添加当前用户消息
    messages.append({"role": "user", "content": message})
    
    # 调用vLLM服务进行推理
    try:
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {args.vllm_api_key}"
        }
        
        # 使用chat/completions API，添加stream参数
        request_data = {
            "messages": messages,
            "temperature": 0.2,
            "max_tokens": 512,  # 减少token数量加快响应
            "top_p": 0.85,
            "top_k": 40,
            "repetition_penalty": 1.15,
            "stream": True
        }
        
        # 使用aiohttp替代requests进行异步请求
        async with aiohttp.ClientSession() as session:
            async with session.post(
                f"{args.vllm_url}/v1/chat/completions",
                headers=headers,
                json=request_data,
                timeout=30  # 减少超时时间
            ) as response:
                if response.status != 200:
                    if response.status == 401:
                        raise Exception(f"认证失败 (401)，请检查API密钥是否正确")
                    else:
                        raise Exception(f"API错误: {response.status} - {await response.text()}")
                
                # 流式处理响应
                collected_message = ""
                
                # 处理流式响应
                async for line in response.content:
                    line = line.decode('utf-8')
                    if not line.strip():
                        continue
                        
                    if line.startswith("data: "):
                        line = line[6:]  # 移除"data: "前缀
                        
                    if line == "[DONE]":
                        break
                        
                    try:
                        chunk = json.loads(line)
                        if "choices" in chunk and len(chunk["choices"]) > 0:
                            delta = chunk["choices"][0].get("delta", {})
                            if "content" in delta and delta["content"]:
                                content = delta["content"]
                                collected_message += content
                                yield collected_message
                    except json.JSONDecodeError:
                        print(f"无法解析JSON: {line}")
                
                # 清理可能的结束标记和前缀
                if "<|im_end|>" in collected_message:
                    collected_message = collected_message.split("<|im_end|>")[0].strip()
                    
                if collected_message.startswith("assistant") or collected_message.startswith("Assistant"):
                    collected_message = collected_message.split("\n", 1)[-1] if "\n" in collected_message else collected_message[len("assistant"):].lstrip()
                
                # 缓存最终响应
                cache_response(cache_key, collected_message)
                
                # 最后一次产出完整消息
                yield collected_message
        
    except Exception as e:
        error_msg = f"生成回复时出错: {e}"
        print(error_msg)
        yield f"抱歉，生成回复时出现错误: {str(e)[:100]}..."

# FastAPI路由
@app.get("/", response_class=HTMLResponse)
async def get_chat_page(request: Request):
    """提供聊天界面的HTML页面"""
    return templates.TemplateResponse(
        "chat.html", 
        {"request": request, "title": "法律顾问AI"}
    )

@app.post("/api/chat")
async def chat_endpoint(request: ChatRequest):
    """非流式聊天API端点"""
    async with request_semaphore:  # 限流
        history = []  # 此处可以从数据库或会话中获取历史记录
        
        # 从请求中提取最后一条用户消息
        user_message = request.messages[-1].content
        
        # 转换历史格式以匹配generate_response函数
        formatted_history = []
        for i in range(0, len(request.messages) - 1, 2):
            if i+1 < len(request.messages):
                formatted_history.append([
                    request.messages[i].content,
                    request.messages[i+1].content
                ])
        
        # 生成响应（消耗迭代器获取最终结果）
        response_generator = generate_response(user_message, formatted_history)
        final_response = ""
        async for partial in response_generator:
            final_response = partial
        
        return ChatResponse(response=final_response)

# 定义API密钥头
API_KEY = "sk123582123"  # 更改为安全的随机字符串
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)

# 验证API密钥的依赖函数
async def get_api_key(api_key: str = Security(api_key_header)):
    if api_key != API_KEY:
        raise HTTPException(
            status_code=status.HTTP_401_UNAUTHORIZED,
            detail="Invalid API Key"
        )
    return api_key

# 修改API端点，添加认证
@app.post("/api/chat/stream")
async def stream_chat_endpoint(
    request: ChatRequest,
    api_key: str = Security(get_api_key)  # 添加API密钥依赖
):
    """流式聊天API端点"""
    async with request_semaphore:  # 限流
        # 提取历史和当前消息
        history = []
        formatted_history = []
        
        # 仅处理最后一条用户消息之前的历史
        messages = request.messages[:-1]  # 除了最后一条
        user_message = request.messages[-1].content  # 最后一条
        
        # 转换历史格式
        for i in range(0, len(messages), 2):
            if i+1 < len(messages):
                formatted_history.append([
                    messages[i].content,
                    messages[i+1].content
                ])
        
        async def event_generator():
            async for partial_response in generate_response(user_message, formatted_history):
                # 每次yield一个SSE事件
                yield {
                    "data": json.dumps({"text": partial_response})
                }
        
        return EventSourceResponse(event_generator())

# 添加GET支持的流式端点
@app.get("/api/chat/stream")
async def stream_chat_endpoint_get(messages: str = None):
    """流式聊天API端点 - GET请求支持"""
    async with request_semaphore:  # 限流
        if not messages:
            raise HTTPException(status_code=400, detail="Messages parameter is required")
        
        try:
            # 解析消息参数
            parsed_messages = json.loads(messages)
            
            # 构造请求对象
            request = ChatRequest(
                messages=[ChatMessage(role=m["role"], content=m["content"]) 
                        for m in parsed_messages]
            )
            
            # 调用现有的处理函数
            return await stream_chat_endpoint(request)
        except Exception as e:
            raise HTTPException(status_code=400, detail=f"Invalid request: {str(e)}")

# 添加健康检查端点
@app.get("/health")
async def health_check():
    """健康检查端点"""
    try:
        headers = {"Authorization": f"Bearer {args.vllm_api_key}"}
        response = requests.get(f"{args.vllm_url}/v1/models", headers=headers, timeout=5)
        if response.status_code == 200:
            return {"status": "healthy", "vllm_service": "connected"}
        return {"status": "degraded", "vllm_service": f"error: {response.status_code}"}
    except Exception as e:
        return {"status": "degraded", "vllm_service": f"error: {str(e)}"}

def main():
    # 1. 确定基础模型路径
    print(f"加载适配器配置: {args.adapter_path}")
    
    # 验证API密钥
    if not args.vllm_api_key or args.vllm_api_key == "your-api-key":
        print("错误: 必须提供有效的API密钥")
        return
    
    print(f"使用API密钥: {args.vllm_api_key[:5]}...{args.vllm_api_key[-5:] if len(args.vllm_api_key) > 10 else ''}")
    
    try:
        from peft import PeftConfig
        peft_config = PeftConfig.from_pretrained(args.adapter_path)
        base_model_path = args.base_model if args.base_model else peft_config.base_model_name_or_path
    except Exception as e:
        print(f"读取适配器配置失败: {e}")
        if args.base_model:
            base_model_path = args.base_model
            print("使用命令行指定的基础模型路径")
        else:
            print("无法确定基础模型路径，请使用--base_model参数指定")
            return
    
    print(f"使用基础模型: {base_model_path}")
    
    # 2. 检查vLLM服务是否可用
    print(f"检查vLLM服务可用性: {args.vllm_url}")
    try:
        headers = {}
        if args.vllm_api_key:
            headers["Authorization"] = f"Bearer {args.vllm_api_key}"
        
        response = requests.get(f"{args.vllm_url}/v1/models", headers=headers)
        if response.status_code == 200:
            models = response.json()
            print(f"vLLM服务可用，加载的模型: {models}")
        else:
            print(f"vLLM服务返回错误: {response.status_code} - {response.text}")
    except Exception as e:
        print(f"连接vLLM服务失败: {e}")
        print("请确保vLLM服务已启动，并检查URL是否正确")
        return
    
    # 确定工作进程数
    if args.workers <= 0:
        workers = min(os.cpu_count() or 1, 16)  # 增加到16个工作进程
    else:
        workers = args.workers
    
    print(f"启动FastAPI服务，端口: {args.port}，工作进程: {workers}")
    
    # 使用绝对路径作为应用程序路径
    import sys
    current_file = os.path.abspath(__file__)
    sys.path.insert(0, os.path.dirname(os.path.dirname(current_file)))
    
    # 获取相对于Python路径的模块名
    module_path = os.path.relpath(current_file, os.path.dirname(os.path.dirname(current_file)))
    module_path = module_path.replace(os.path.sep, '.').replace('.py', '')
    
    uvicorn.run(
        f"{module_path}:app",  # 例如 "SFT_web.Fastapi_vllm:app"
        host="0.0.0.0", 
        port=args.port,
        workers=workers,
        log_level="info",
        timeout_keep_alive=30
    )

if __name__ == "__main__":
    main()
