# -*- coding: utf-8 -*-
from fastapi import FastAPI, HTTPException, Request
from pydantic import BaseModel
from typing import Optional, Dict, Any
import uvicorn
from .structuredDataExtraction import initialize_components, build_vector_index
from .config_llamaindex import (
    DATA_DIR, STORAGE_DIR, OLLAMA_CONFIG, 
    EMBEDDING_CONFIG, TEXT_SPLITTER_CONFIG,
    QUERY_ENGINE_CONFIG
)
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.ollama import Ollama
from llama_index.core.node_parser import SentenceSplitter
from llama_index.readers.file import PDFReader
import torch
import time
import requests
import logging
from functools import wraps
import asyncio
from concurrent.futures import ThreadPoolExecutor
import json
from datetime import datetime
from pathlib import Path

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('api_requests.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 创建线程池
executor = ThreadPoolExecutor(max_workers=4)

app = FastAPI(title="LlamaIndex API", description="基于LlamaIndex的文档问答API")

# 全局变量存储索引和查询引擎
index = None
query_engine = None
active_requests: Dict[str, Any] = {}

class QueryRequest(BaseModel):
    question: str
    top_k: Optional[int] = None
    timeout: Optional[int] = 300  # 默认超时时间5分钟

class QueryResponse(BaseModel):
    request_id: str
    answer: str
    sources: list[dict]
    processing_time: float

def retry_on_timeout(max_retries=3, delay=5):
    """重试装饰器"""
    def decorator(func):
        @wraps(func)
        async def wrapper(*args, **kwargs):
            for attempt in range(max_retries):
                try:
                    return await func(*args, **kwargs)
                except asyncio.TimeoutError:
                    if attempt == max_retries - 1:
                        raise
                    logger.warning(f"操作超时，{delay}秒后重试... (尝试 {attempt + 1}/{max_retries})")
                    await asyncio.sleep(delay)
                except Exception as e:
                    raise
            return None
        return wrapper
    return decorator

def check_ollama_service():
    """检查Ollama服务是否可用"""
    try:
        response = requests.get(f"{OLLAMA_CONFIG['base_url']}/api/tags", timeout=5)
        return response.status_code == 200
    except:
        return False

def wait_for_ollama(max_wait=60):
    """等待Ollama服务启动"""
    logger.info("等待Ollama服务启动...")
    for i in range(max_wait):
        if check_ollama_service():
            logger.info("Ollama服务已就绪")
            return True
        logger.info(f"等待中... ({i+1}/{max_wait})")
        time.sleep(1)
    return False

def load_documents():
    """加载文档"""
    try:
        # 创建PDF阅读器
        pdf_reader = PDFReader()
        
        # 加载所有文档
        documents = []
        
        # 处理PDF文件
        pdf_files = list(DATA_DIR.glob("**/*.pdf"))
        if pdf_files:
            logger.info(f"找到 {len(pdf_files)} 个PDF文件")
            for pdf_file in pdf_files:
                try:
                    pdf_docs = pdf_reader.load_data(pdf_file)
                    documents.extend(pdf_docs)
                    logger.info(f"成功加载PDF文件: {pdf_file}")
                except Exception as e:
                    logger.error(f"加载PDF文件失败 {pdf_file}: {str(e)}")
        
        # 处理其他文本文件
        text_files = list(DATA_DIR.glob("**/*.txt")) + list(DATA_DIR.glob("**/*.md"))
        if text_files:
            logger.info(f"找到 {len(text_files)} 个文本文件")
            for text_file in text_files:
                try:
                    with open(text_file, 'r', encoding='utf-8') as f:
                        content = f.read()
                        documents.append({"text": content, "metadata": {"file_path": str(text_file)}})
                    logger.info(f"成功加载文本文件: {text_file}")
                except Exception as e:
                    logger.error(f"加载文本文件失败 {text_file}: {str(e)}")
        
        if not documents:
            raise Exception("没有找到任何可处理的文档")
            
        return documents
    except Exception as e:
        logger.error(f"加载文档时出错: {str(e)}")
        raise

@app.on_event("startup")
async def startup_event():
    """启动时初始化索引和查询引擎"""
    global index, query_engine
    
    try:
        # 等待Ollama服务启动
        if not wait_for_ollama():
            raise Exception("Ollama服务启动超时，请确保Ollama服务正在运行")
        
        # 初始化组件
        device = "cuda" if torch.cuda.is_available() else "cpu"
        logger.info(f"使用设备: {device}")
        
        # 配置嵌入模型
        embed_model = HuggingFaceEmbedding(
            model_name=EMBEDDING_CONFIG["model_name"],
            device=device,
            embed_batch_size=EMBEDDING_CONFIG["embed_batch_size"]
        )
        
        # 配置LLM
        llm = Ollama(
            model=OLLAMA_CONFIG["model"],
            temperature=OLLAMA_CONFIG["temperature"],
            request_timeout=OLLAMA_CONFIG["request_timeout"],
            base_url=OLLAMA_CONFIG["base_url"]
        )
        
        # 配置文本分块器
        text_splitter = SentenceSplitter(**TEXT_SPLITTER_CONFIG)
        
        # 设置全局配置
        Settings.embed_model = embed_model
        Settings.llm = llm
        Settings.text_splitter = text_splitter
        
        # 加载文档
        logger.info("正在加载文档...")
        if not DATA_DIR.exists():
            raise Exception(f"数据目录 {DATA_DIR} 不存在")
            
        documents = load_documents()
            
        # 构建索引
        logger.info("正在构建索引...")
        index = build_vector_index(documents, embed_model, text_splitter, str(STORAGE_DIR))
        
        # 创建查询引擎
        query_engine = index.as_query_engine(**QUERY_ENGINE_CONFIG)
        logger.info("API服务已就绪")
        
    except Exception as e:
        logger.error(f"初始化失败: {str(e)}")
        raise

@app.middleware("http")
async def log_requests(request: Request, call_next):
    """记录所有请求"""
    request_id = f"{int(time.time())}_{id(request)}"
    start_time = time.time()
    
    # 记录请求开始
    body = None
    if request.method == "POST":
        try:
            body = await request.json()
        except:
            body = await request.body()
    
    logger.info(f"Request {request_id} started - Method: {request.method}, URL: {request.url}, Body: {body}")
    
    try:
        response = await call_next(request)
        
        # 记录请求结束
        process_time = time.time() - start_time
        logger.info(f"Request {request_id} completed - Time: {process_time:.2f}s, Status: {response.status_code}")
        
        return response
    except Exception as e:
        # 记录错误
        logger.error(f"Request {request_id} failed - Error: {str(e)}")
        raise
    finally:
        # 清理活动请求
        if request_id in active_requests:
            del active_requests[request_id]

@retry_on_timeout(max_retries=3, delay=5)
async def process_query(request_id: str, query: str, top_k: Optional[int] = None, timeout: int = 300):
    """处理查询的异步函数"""
    if not query_engine:
        raise HTTPException(status_code=503, detail="服务未就绪")
    
    start_time = time.time()
    active_requests[request_id] = {
        "start_time": start_time,
        "query": query,
        "status": "processing"
    }
    
    try:
        # 设置top_k（如果提供）
        if top_k is not None:
            query_engine.similarity_top_k = top_k
        
        # 添加中文提示词
        formatted_query = f"""请用中文回答以下问题。回答要简洁、专业、直接切入重点：

{query}

请用中文回答。"""
        
        # 在线程池中执行查询，设置超时
        loop = asyncio.get_event_loop()
        response = await asyncio.wait_for(
            loop.run_in_executor(executor, lambda: query_engine.query(formatted_query)),
            timeout=timeout
        )
        
        # 如果回答中包含<think>标签，只保留最终答案部分
        answer = str(response)
        if "<think>" in answer and "</think>" in answer:
            answer = answer.split("</think>")[-1].strip()
        
        # 构建响应
        sources = []
        for node in response.source_nodes:
            # 获取真实文件路径
            file_path = node.metadata.get("file_path")
            if not file_path:
                file_path = getattr(node.node, "metadata", {}).get("file_path", "未知")
            # 百分制分数
            percent_score = round(float(node.score) * 100, 2) if node.score is not None else None
            # 获取全部上下文内容
            if hasattr(node.node, "get_content"):
                full_content = node.node.get_content()
            else:
                full_content = node.text
            sources.append({
                "file": file_path,
                "score": percent_score,
                "content": full_content
            })
        
        processing_time = time.time() - start_time
        
        # 记录成功的查询
        logger.info(f"Query {request_id} completed successfully in {processing_time:.2f}s")
        
        return QueryResponse(
            request_id=request_id,
            answer=answer,
            sources=sources,
            processing_time=processing_time
        )
        
    except asyncio.TimeoutError:
        logger.error(f"Query {request_id} timed out after {timeout}s")
        raise
    except Exception as e:
        logger.error(f"Query {request_id} failed: {str(e)}")
        raise
    finally:
        if request_id in active_requests:
            active_requests[request_id]["status"] = "completed"

@app.post("/query", response_model=QueryResponse)
async def query(request: QueryRequest):
    """处理查询请求"""
    request_id = f"query_{int(time.time())}_{id(request)}"
    
    try:
        return await asyncio.wait_for(
            process_query(request_id, request.question, request.top_k, request.timeout),
            timeout=request.timeout
        )
    except asyncio.TimeoutError:
        # 记录超时
        logger.error(f"Request {request_id} timed out after {request.timeout}s")
        raise HTTPException(
            status_code=408,
            detail=f"查询超时（{request.timeout}秒）"
        )
    except Exception as e:
        # 记录错误
        logger.error(f"Request {request_id} failed with error: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/status")
async def get_status():
    """获取服务状态"""
    return {
        "service_status": "ready" if query_engine else "initializing",
        "active_requests": len(active_requests),
        "active_requests_details": active_requests
    }

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8009)