# -*- coding: utf-8 -*-
"""
佛山大学问答系统API服务器
结合微调模型和RAG检索的完整问答服务
"""

from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import List, Optional, Dict, Any
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
import uvicorn
from pathlib import Path
import json
from datetime import datetime
import asyncio

# RAG组件
from rag_builder import UniversityRAGBuilder

app = FastAPI(
    title="佛山大学智能问答API",
    description="基于Qwen3-4B + LoRA + RAG的智能问答服务",
    version="1.0.0"
)

class QueryRequest(BaseModel):
    question: str
    max_tokens: int = 256
    temperature: float = 0.7
    top_p: float = 0.9
    use_rag: bool = True

class QueryResponse(BaseModel):
    answer: str
    confidence: float = 0.0
    retrieved_contexts: List[str] = []
    processing_time: float = 0.0
    model_info: str = "Qwen3-4B-LoRA"

class UniversityQAService:
    """佛山大学问答服务"""
    
    def __init__(
        self,
        model_path: str = "./qwen3-university-lora",
        base_model_path: str = "./models/Qwen3-4B-Instruct-2507",
        rag_vector_path: str = "./university_knowledge"
    ):
        self.model_path = Path(model_path)
        self.base_model_path = Path(base_model_path)
        self.rag_vector_path = Path(rag_vector_path)
        
        # 模型组件
        self.model = None
        self.tokenizer = None
        
        # RAG组件
        self.rag_builder = None
        
        # 服务状态
        self.is_ready = False
        
    async def initialize(self):
        """异步初始化服务"""
        print("🚀 初始化佛山大学问答服务...")
        
        try:
            # 加载分词器
            print("加载分词器...")
            self.tokenizer = AutoTokenizer.from_pretrained(
                str(self.base_model_path),
                trust_remote_code=True
            )
            
            if self.tokenizer.pad_token is None:
                self.tokenizer.pad_token = self.tokenizer.eos_token
            
            # 加载基础模型
            print("加载基础模型...")
            base_model = AutoModelForCausalLM.from_pretrained(
                str(self.base_model_path),
                torch_dtype=torch.float16,
                device_map="auto",
                trust_remote_code=True
            )
            
            # 加载LoRA适配器
            if self.model_path.exists():
                print("加载LoRA微调适配器...")
                self.model = PeftModel.from_pretrained(
                    base_model, 
                    str(self.model_path),
                    torch_dtype=torch.float16
                )
            else:
                print("⚠️ 未找到微调模型，使用基础模型")
                self.model = base_model
            
            self.model.eval()
            
            # 初始化RAG
            if self.rag_vector_path.exists():
                print("初始化RAG检索系统...")
                self.rag_builder = UniversityRAGBuilder(
                    rag_data_path="./go-llm-cleaner/go_cleaned_data",
                    vector_store_path=str(self.rag_vector_path)
                )
                self.rag_builder.load_existing_vectorstore()
                print("✅ RAG检索系统初始化完成")
            else:
                print("⚠️ 未找到向量数据库，禁用RAG功能")
            
            self.is_ready = True
            print("✅ 服务初始化完成！")
            
        except Exception as e:
            print(f"❌ 服务初始化失败: {str(e)}")
            raise
    
    def retrieve_context(self, question: str, k: int = 3) -> List[str]:
        """检索相关上下文"""
        if not self.rag_builder or not self.rag_builder.vectorstore:
            return []
        
        try:
            # 执行相似度搜索
            results = self.rag_builder.vectorstore.similarity_search(
                question, k=k
            )
            
            # 提取文本内容
            contexts = [doc.page_content for doc in results]
            return contexts
            
        except Exception as e:
            print(f"RAG检索错误: {str(e)}")
            return []
    
    def generate_answer(
        self, 
        question: str, 
        contexts: List[str] = None, 
        max_tokens: int = 256,
        temperature: float = 0.7,
        top_p: float = 0.9
    ) -> str:
        """生成回答"""
        # 构建系统提示词
        system_prompt = "你是佛山大学的智能问答助手。请根据提供的信息准确、友好地回答用户关于佛山大学的问题。回答要简洁明了，重点突出。"
        
        # 构建用户输入
        user_input = question
        if contexts:
            context_text = "\n".join([f"参考信息{i+1}: {ctx}" for i, ctx in enumerate(contexts)])
            user_input = f"参考以下信息回答问题：\n{context_text}\n\n问题：{question}"
        
        # 构建完整prompt
        full_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_input}<|im_end|>\n<|im_start|>assistant\n"
        
        # 编码输入
        inputs = self.tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=1024)
        if hasattr(self.model, 'device'):
            inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
        
        # 生成回答
        with torch.no_grad():
            outputs = self.model.generate(
                **inputs,
                max_new_tokens=max_tokens,
                do_sample=True,
                temperature=temperature,
                top_p=top_p,
                pad_token_id=self.tokenizer.eos_token_id,
                eos_token_id=self.tokenizer.eos_token_id,
                repetition_penalty=1.1
            )
        
        # 解码输出
        response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        answer = response.split("<|im_start|>assistant\n")[-1].strip()
        
        return answer

# 创建服务实例
qa_service = UniversityQAService()

@app.on_event("startup")
async def startup_event():
    """服务启动时初始化"""
    await qa_service.initialize()

@app.get("/")
async def root():
    """根路径"""
    return {
        "message": "佛山大学智能问答API服务",
        "version": "1.0.0",
        "status": "ready" if qa_service.is_ready else "initializing"
    }

@app.get("/health")
async def health_check():
    """健康检查"""
    return {
        "status": "healthy" if qa_service.is_ready else "initializing",
        "model_loaded": qa_service.model is not None,
        "rag_enabled": qa_service.rag_builder is not None,
        "timestamp": datetime.now().isoformat()
    }

@app.post("/query", response_model=QueryResponse)
async def query(request: QueryRequest):
    """问答接口"""
    if not qa_service.is_ready:
        raise HTTPException(status_code=503, detail="服务正在初始化中，请稍后重试")
    
    start_time = datetime.now()
    
    try:
        # 检索相关上下文
        contexts = []
        if request.use_rag and qa_service.rag_builder:
            contexts = qa_service.retrieve_context(request.question, k=3)
        
        # 生成回答
        answer = qa_service.generate_answer(
            question=request.question,
            contexts=contexts,
            max_tokens=request.max_tokens,
            temperature=request.temperature,
            top_p=request.top_p
        )
        
        # 计算处理时间
        processing_time = (datetime.now() - start_time).total_seconds()
        
        return QueryResponse(
            answer=answer,
            confidence=0.8,  # 简化的置信度
            retrieved_contexts=contexts,
            processing_time=processing_time,
            model_info="Qwen3-4B-LoRA + RAG"
        )
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"处理请求时出错: {str(e)}")

@app.get("/stats")
async def get_stats():
    """获取服务统计信息"""
    stats = {
        "service_status": "ready" if qa_service.is_ready else "initializing",
        "model_info": {
            "base_model": "Qwen3-4B-Instruct-2507",
            "lora_enabled": qa_service.model_path.exists(),
            "model_size": "4B parameters"
        }
    }
    
    # RAG统计
    if qa_service.rag_builder:
        rag_stats = qa_service.rag_builder.get_retrieval_stats()
        if rag_stats:
            stats["rag_info"] = rag_stats
    
    return stats

@app.post("/test")
async def test_qa():
    """测试问答功能"""
    test_questions = [
        "佛山大学的图书馆开放时间是什么？",
        "如何申请国家基金？",
        "学校有哪些学院？",
        "电子信息工程学院的联系方式是什么？"
    ]
    
    results = []
    for question in test_questions:
        try:
            contexts = qa_service.retrieve_context(question, k=2) if qa_service.rag_builder else []
            answer = qa_service.generate_answer(question, contexts, max_tokens=150)
            
            results.append({
                "question": question,
                "answer": answer,
                "contexts_count": len(contexts)
            })
        except Exception as e:
            results.append({
                "question": question,
                "error": str(e)
            })
    
    return {"test_results": results}

if __name__ == "__main__":
    print("🚀 启动佛山大学问答API服务...")
    uvicorn.run(
        app,
        host="0.0.0.0",
        port=8000,
        reload=False
    )