from fastapi.security.api_key import APIKeyHeader
from logging import Logger
from contextlib import asynccontextmanager
from fastapi import FastAPI, Depends, HTTPException, Request
from fastapi.security import APIKeyHeader
from fastapi.responses import JSONResponse, StreamingResponse, FileResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware import Middleware
from starlette.middleware.base import BaseHTTPMiddleware
from pydantic import BaseModel
import uvicorn
import logging
from logging.handlers import RotatingFileHandler
from config import Config
from utils import route_request
from typing import Optional, AsyncGenerator
import asyncio
import os 
import time  
import uuid
from chat_history import chat_history_manager
from models import AnalysisRequest
from starlette.concurrency import run_in_threadpool
import signal
import sys

# 确保日志目录存在
log_dir = "logs"
if not os.path.exists(log_dir):
    os.makedirs(log_dir)

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(),  # 控制台输出
        RotatingFileHandler(
            os.path.join(log_dir, "app.log"),
            maxBytes=10*1024*1024,  # 10MB
            backupCount=5  # 保留5个备份文件
        )
    ]
)

logger: Logger = logging.getLogger(__name__)
api_key_header: APIKeyHeader = APIKeyHeader(name="X-API-Key", auto_error=False)

# 全局变量存储清理任务
cleanup_task = None

# 高性能超时中间件
class HighPerformanceTimeoutMiddleware(BaseHTTPMiddleware):
    def __init__(self, app, timeout=60):  # 减少超时时间到60秒
        super().__init__(app)
        self.timeout = timeout
    
    async def dispatch(self, request: Request, call_next):
        try:
            # 设置更短的超时处理
            response = await asyncio.wait_for(call_next(request), timeout=self.timeout)
            return response
        except asyncio.TimeoutError:
            logger.warning(f"请求处理超时: {request.url.path}")
            return JSONResponse(
                status_code=504,
                content={"detail": "请求处理超时，请稍后重试"}
            )
        except Exception as e:
            logger.error(f"请求处理异常: {str(e)}")
            return JSONResponse(
                status_code=500,
                content={"detail": f"请求处理异常: {str(e)}"}
            )

# 高性能并发控制中间件
class HighPerformanceConcurrencyMiddleware(BaseHTTPMiddleware):
    def __init__(self, app, max_concurrent=100):
        super().__init__(app)
        self.max_concurrent = max_concurrent
        self.current_requests = 0
        self.semaphore = asyncio.Semaphore(max_concurrent)
        self.request_counter_lock = asyncio.Lock()
        # 清理日志频率，每100个请求记录一次
        self.log_counter = 0
    
    async def dispatch(self, request: Request, call_next):
        # 快速检查是否超出限制
        if self.current_requests >= self.max_concurrent:
            return JSONResponse(
                status_code=429,
                content={"detail": "服务繁忙，请稍后重试"}
            )
        
        async with self.semaphore:
            async with self.request_counter_lock:
                self.current_requests += 1
                self.log_counter += 1
                # 只在特定情况下记录日志，减少I/O开销
                if self.log_counter % 10 == 0 or self.current_requests > self.max_concurrent * 0.8:
                    logger.info(f"当前并发请求数: {self.current_requests}/{self.max_concurrent}")
            
            try:
                response = await call_next(request)
                return response
            finally:
                async with self.request_counter_lock:
                    self.current_requests -= 1

# 定义 lifespan 事件处理器（移到应用初始化之前）
@asynccontextmanager
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
    global cleanup_task
    # 启动时执行的代码
    logger.info("应用启动，开始初始化...")
    cleanup_task = asyncio.create_task(cleanup_temp_files())
    logger.info("临时文件清理任务已启动")
    
    yield  # 应用运行期间
    # 关闭时执行的代码
    logger.info("应用关闭，正在清理资源...")
    if cleanup_task and not cleanup_task.done():
        cleanup_task.cancel()
        try:
            await cleanup_task
        except asyncio.CancelledError:
            logger.info("临时文件清理任务已取消")
    
    # 关闭aiohttp会话
    from utils import service
    logger.info("正在关闭aiohttp会话...")
    await service.close_session()
    logger.info("aiohttp会话已关闭")
    
    logger.info("资源清理完成")


# 初始化应用
app = FastAPI(
    title="清算服务API",
    version="1.0",
    docs_url="/docs",
    redoc_url="/redoc",
    lifespan=lifespan,
    root_path="/ccAgent"  # 指定lifespan事件处理器
)

# 添加CORS中间件以允许跨域请求
app.add_middleware(
    middleware_class=CORSMiddleware,
    allow_origins=["*"],  # 在生产环境中应该设置具体的域名
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 添加高性能并发控制中间件
app.add_middleware(HighPerformanceConcurrencyMiddleware, max_concurrent=20)

# 添加高性能超时中间件
app.add_middleware(HighPerformanceTimeoutMiddleware, timeout=60)


# 临时文件清理任务
async def cleanup_temp_files():
    while True:
        try:
            temp_dir = Config.TEMP_DOWNLOAD_PATH
            if not os.path.exists(temp_dir):
                await asyncio.sleep(3600)  # 1小时后再次检查
                continue

            current_time = time.time()
            for f in os.listdir(temp_dir):
                file_path = os.path.join(temp_dir, f)
                # 删除超过7天的文件
                if os.path.isfile(file_path) and current_time - os.path.getmtime(file_path) > 604800:
                    os.remove(file_path)
                    logger.info(f"删除过期临时文件: {file_path}")
        except Exception as e:
            logger.error(f"清理临时文件错误: {str(e)}")
        finally:
            await asyncio.sleep(3600)  # 每小时清理一次


# API密钥验证
async def verify_api_key(api_key: str = Depends(api_key_header)):
    if not api_key or api_key != Config.API_KEY:
        raise HTTPException(
            status_code=401,
            detail="无效或缺失的API密钥"
        )
    return api_key


 


# 高性能核心接口
@app.post("/intent_analysis")
async def intent_analysis(
    req: AnalysisRequest,
    api_key: str = Depends(verify_api_key)
):
    """意图分析接口 - 高性能优化版"""
    start_time = time.time()
    
    try:
        logger.info(f"[步骤0-开始] 接收到请求: {req.user_query[:50]}...")  # 缩短日志
        
        # 使用更短的超时时间，提高响应速度
        try:
            result = await asyncio.wait_for(
                route_request(
                    user_query=req.user_query,
                    phone_number=req.phone_number or "",  # 修复类型错误
                    conversation_id=req.conversation_id,
                    debug=req.debug,
                    include_analysis=req.include_analysis,
                    include_chart=req.include_chart,
                    is_monitor=req.is_monitor,  # 传递监控标识
                    timeout=30  # 减少内部超时
                ), 
                timeout=60  # 增加总超时到60秒
            )
        except asyncio.TimeoutError:
            elapsed = time.time() - start_time
            logger.error(f"[步骤5-完成] 请求处理超时: {elapsed:.2f}秒")
            raise HTTPException(status_code=504, detail="请求处理超时，请稍后重试")
        
        end_time = time.time()
        logger.info(f"[步骤5-完成] 请求处理完成，总耗时: {end_time - start_time:.2f}秒")
        
        # 简化响应处理
        if isinstance(result, StreamingResponse):
            return result
        elif isinstance(result, dict) and "status_code" in result and "content" in result:
            return JSONResponse(status_code=result["status_code"], content=result["content"])
        elif isinstance(result, dict):
            return result
        else:
            # 如果result不是预期的类型，记录错误并返回错误响应
            logger.error(f"意外的结果类型: {type(result)}")
            return JSONResponse(status_code=500, content={"detail": "内部服务器错误"})
            
    except HTTPException:
        # 重新抛出HTTP异常
        raise
    except Exception as e:
        end_time = time.time()
        logger.error(f"[步骤5-完成] 请求处理失败: {str(e)}, 耗时: {end_time - start_time:.2f}秒")
        raise HTTPException(status_code=500, detail=f"请求处理异常: {str(e)}")
        
# 文件下载路由
@app.get("/download/{file_id}")
async def download_file(file_id: str, filename: str, request: Request):
    """
    处理文件下载请求
    :param file_id: 文件ID
    :param filename: 文件名
    :param request: FastAPI请求对象
    :return: FileResponse
    """
    # 构建文件路径
    file_path = os.path.join(Config.TEMP_DOWNLOAD_PATH, f"{file_id}_{filename}")
    
    # 检查文件是否存在
    if not os.path.exists(file_path):
        raise HTTPException(status_code=404, detail="文件未找到")
    
    # 返回文件响应
    return FileResponse(file_path, media_type='application/octet-stream', filename=filename)

# 处理跨月记录确认的路由
@app.post("/confirm_cross_month")
async def confirm_cross_month(
    conversation_id: str,
    user_response: str,
    phone_number: str,
    api_key: str = Depends(verify_api_key)
):
    """处理用户对跨月记录的确认响应"""
    try:
        from sett_service import handle_cross_month_confirmation
        
        result = await handle_cross_month_confirmation(conversation_id, user_response, phone_number)
        return result
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"[跨月记录确认] 处理确认响应失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"处理确认响应失败: {str(e)}")

# 健康检查端点
@app.get("/health")
async def health_check():
    """
    系统健康检查
    """
    return {
        "status": "healthy",
        "timestamp": time.time(),
        "version": "2.0"
    }

# 用户点赞相关接口
from assess import LikeRequest, submit_like, get_like_statistics, has_user_liked

@app.post("/like/submit")
async def submit_user_like(
    like_request: LikeRequest,
    api_key: str = Depends(verify_api_key)
):
    """
    提交用户点赞
    
    Args:
        like_request: 点赞请求信息
        api_key: API密钥
        
    Returns:
        点赞结果
    """
    try:
        # 直接传递LikeRequest对象给submit_like函数
        result = submit_like(like_request)
        
        return result
        
    except Exception as e:
        logger.error(f"提交用户点赞失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"提交点赞失败: {str(e)}")

@app.get("/like/stats")
async def get_like_stats(api_key: str = Depends(verify_api_key)):
    """
    获取点赞统计信息
    
    Args:
        api_key: API密钥
        
    Returns:
        点赞统计信息
    """
    try:
        stats = get_like_statistics()
        return stats.dict()
        
    except Exception as e:
        logger.error(f"获取点赞统计失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取统计失败: {str(e)}")

@app.get("/like/check/{conversation_id}")
async def check_user_like(
    conversation_id: str,
    api_key: str = Depends(verify_api_key)
):
    """
    检查用户是否已经点赞过某个会话
    
    Args:
        conversation_id: 会话ID
        api_key: API密钥
        
    Returns:
        是否已点赞
    """
    try:
        has_liked = has_user_liked(conversation_id)
        return {
            "conversation_id": conversation_id,
            "has_liked": has_liked
        }
        
    except Exception as e:
        logger.error(f"检查点赞状态失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"检查点赞状态失败: {str(e)}")

@app.get("/like/health")
async def like_health_check():
    """
    点赞系统健康检查
    """
    try:
        stats = get_like_statistics()
        return {
            "status": "healthy",
            "total_likes": stats.total_likes,
            "today_likes": stats.today_likes,
            "last_updated": stats.last_updated
        }
    except Exception as e:
        logger.error(f"点赞系统健康检查失败: {str(e)}")
        return {
            "status": "unhealthy",
            "error": str(e)
        }


# 高性能服务启动
if __name__ == "__main__":
    import sys
    # 检查系统平台，Windows上不使用uvloop
    loop_type = "auto"
    if sys.platform != "win32":
        try:
            # uvloop仅在Linux/macOS上可用
            import uvloop  # type: ignore
            loop_type = "uvloop"
        except ImportError:
            loop_type = "auto"
    
    uvicorn.run(
        app, 
        host="0.0.0.0", 
        port=8088, 
        workers=1,  # 单进程模式，避免进程间通信开销
        loop=loop_type,  # 根据平台选择事件循环
        timeout_keep_alive=15,  # 减少keep-alive超时
        timeout_graceful_shutdown=10,  # 减少关闭超时
        limit_concurrency=20,  # 调整并发限制为20，匹配连接池大小
        limit_max_requests=10000,  # 请求限制
        backlog=4096,  # 增加连接队列
        access_log=False,  # 关闭访问日志提高性能
        # 对于高并发场景的优化
        h11_max_incomplete_event_size=65536,
    )