from flask import Flask, request, jsonify
from flask_cors import CORS
import os
import json
import numpy as np
import torch
import faiss  # 导入faiss库
from transformers import AutoTokenizer, AutoModel
from llm_integration import LLMService
from relevance_classifier import RobertaRelevanceClassifier  # 导入RoBERTa相关性分类器
import logging  # 添加logging模块
import time  # 添加time模块

# 全局变量
INDEX_BUILT_FLAG_FILE = os.path.join(os.path.dirname(__file__), "data", "index_built.flag")

# 检查索引是否已构建
def is_index_built():
    return os.path.exists(INDEX_BUILT_FLAG_FILE)

# 标记索引已构建
def mark_index_built():
    with open(INDEX_BUILT_FLAG_FILE, 'w') as f:
        f.write('1')

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger("app")

app = Flask(__name__)
CORS(app)  # 启用跨域支持

# 知识库路径
KNOWLEDGE_FILE = os.path.join(os.path.dirname(__file__), "data", "knowledge.json")

# 确保data目录存在
os.makedirs(os.path.join(os.path.dirname(__file__), "data"), exist_ok=True)

# 知识库类
class KnowledgeBase:
    def __init__(self, knowledge_file):
        self.knowledge_data = []
        if os.path.exists(knowledge_file):
            with open(knowledge_file, 'r', encoding='utf-8') as f:
                self.knowledge_data = json.load(f)
    
    def get_knowledge_texts(self):
        return [entry["text"] for entry in self.knowledge_data]
    
    def get_entry_by_index(self, index):
        if index < 0 or index >= len(self.knowledge_data):
            return {}
        return self.knowledge_data[index]
    
    def add_entry(self, text):
        new_id = len(self.knowledge_data) + 1
        self.knowledge_data.append({"id": new_id, "text": text})
        return new_id
    
    def save(self, file_path):
        with open(file_path, 'w', encoding='utf-8') as f:
            json.dump(self.knowledge_data, f, ensure_ascii=False, indent=2)

# 检索器类（使用faiss进行向量检索）
class Retriever:
    def __init__(self, model_name="moka-ai/m3e-base"):
        self.model_name = model_name
        
        # 设置模型缓存目录
        cache_dir = os.path.join(os.path.dirname(__file__), "models_cache")
        logger.info(f"开始加载模型 '{model_name}' (使用缓存目录: {cache_dir})")
        
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
            logger.info("Tokenizer加载完成")
            
            self.model = AutoModel.from_pretrained(model_name, cache_dir=cache_dir)
            logger.info("模型权重加载完成")
            
            # 检查是否有可用的GPU
            self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
            logger.info(f"使用设备: {self.device}")
            
            # 将模型移至GPU（如果可用）
            self.model = self.model.to(self.device)
            self.model.eval()
            logger.info(f"模型已设置为评估模式并移至{self.device}设备")
            
            # 初始化FAISS GPU资源
            self.use_gpu = torch.cuda.is_available()
            if self.use_gpu:
                logger.info("初始化FAISS GPU资源")
                self.gpu_resources = faiss.StandardGpuResources()
                logger.info("FAISS GPU资源初始化完成")
            else:
                logger.info("未检测到GPU，将使用CPU进行FAISS索引")
            
            self.index = None  # faiss索引
            self.knowledge_texts = []
            self.embeddings_matrix = None
            
        except Exception as e:
            logger.error(f"模型加载失败: {str(e)}")
            raise

    def _mean_pooling(self, model_output, attention_mask):
        token_embeddings = model_output[0]
        input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
        sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, dim=1)
        sum_mask = torch.clamp(input_mask_expanded.sum(dim=1), min=1e-9)
        return sum_embeddings / sum_mask
        
    def _normalize_vector(self, vector):
        """将向量归一化为单位长度"""
        norm = np.linalg.norm(vector)
        if norm > 0:
            return vector / norm
        return vector

    def build_index(self, knowledge_texts):
        self.knowledge_texts = knowledge_texts
        
        # 如果知识库为空，则直接返回
        if not knowledge_texts:
            logger.warning("知识库为空，无法构建索引")
            return
            
        # 添加进度日志
        total_entries = len(knowledge_texts)
        logger.info(f"开始处理 {total_entries} 条知识条目的向量化")
        
        # 批处理而不是逐条处理
        batch_size = 32  # 可以根据GPU内存调整
        embeddings = []
        
        for i in range(0, total_entries, batch_size):
            batch_texts = knowledge_texts[i:i+batch_size]
            current_batch = i//batch_size + 1
            total_batches = (total_entries-1)//batch_size + 1
            logger.info(f"处理批次 {current_batch}/{total_batches} (条目 {i+1}-{min(i+batch_size, total_entries)})")
            
            inputs = self.tokenizer(batch_texts, padding=True, truncation=True, 
                                  max_length=128, return_tensors='pt')
            if self.use_gpu:
                inputs = {k: v.to(self.device) for k, v in inputs.items()}
            
            with torch.no_grad():
                outputs = self.model(**inputs)
                
            # 批量处理平均池化
            pooled_embeddings = self._mean_pooling(outputs, inputs['attention_mask'])
            batch_embeddings = pooled_embeddings.cpu().numpy()
            
            # 归一化向量并添加到列表
            for emb in batch_embeddings:
                embeddings.append(self._normalize_vector(emb))
        
        logger.info("向量化处理完成，正在构建FAISS索引")
        self.embeddings_matrix = np.array(embeddings).astype('float32')
        
        # 使用faiss创建索引，使用内积计算相似度（向量已归一化，内积等于余弦相似度）
        dimension = self.embeddings_matrix.shape[1]  # 向量维度
        
        # 创建CPU索引
        cpu_index = faiss.IndexFlatIP(dimension)  # 使用内积（点积）相似度
        
        # 如果GPU可用，将索引转移到GPU
        if self.use_gpu:
            logger.info(f"将FAISS索引转移到GPU，向量维度: {dimension}")
            self.index = faiss.index_cpu_to_gpu(self.gpu_resources, 0, cpu_index)
        else:
            self.index = cpu_index
            
        # 添加向量到索引
        logger.info("正在将向量添加到FAISS索引...")
        self.index.add(self.embeddings_matrix)  # 添加向量到索引
        logger.info(f"FAISS索引构建完成，包含 {len(self.knowledge_texts)} 条知识条目")

    def retrieve(self, query, top_k=3):
        if self.index is None:
            raise ValueError("Index not built yet.")
        
        # 确保knowledge_texts不为空
        if not self.knowledge_texts:
            logger.warning("知识库为空，无法进行检索")
            return [], []
            
        logger.info(f"开始检索与问题相关的知识：'{query}'")
        
        start_time = time.time()  # 添加时间计时
        inputs = self.tokenizer(query, return_tensors='pt', truncation=True, max_length=128)
        # 将输入移至GPU（如果可用）
        if self.use_gpu:
            inputs = {k: v.to(self.device) for k, v in inputs.items()}
            
        with torch.no_grad():
            outputs = self.model(**inputs)
        query_emb = self._mean_pooling(outputs, inputs['attention_mask'])
        query_emb_np = query_emb.cpu().numpy().astype('float32')
        # 归一化查询向量
        query_emb_np = self._normalize_vector(query_emb_np)

        # 使用faiss进行向量检索
        logger.info(f"执行FAISS向量检索，检索Top-{top_k}个相关结果")
        scores, indices = self.index.search(query_emb_np, min(top_k, len(self.knowledge_texts)))
        
        # 分数处理，确保在0-1范围内（余弦相似度原本是-1到1，转换为0到1更直观）
        normalized_scores = [(score + 1) / 2 for score in scores[0]]
        
        retrieval_time = time.time() - start_time
        logger.info(f"检索完成，耗时 {retrieval_time:.2f} 秒")
        
        # 转换为列表格式
        return indices[0].tolist(), normalized_scores

# 初始化知识库、检索器和大模型服务
logger.info("正在初始化知识库...")
kb = KnowledgeBase(KNOWLEDGE_FILE)
total_knowledge = len(kb.get_knowledge_texts())
logger.info(f"知识库加载完成，共有 {total_knowledge} 条知识条目")

# 设置模型缓存目录，避免每次重新下载
cache_dir = os.path.join(os.path.dirname(__file__), "models_cache")
os.makedirs(cache_dir, exist_ok=True)
logger.info(f"使用模型缓存目录: {cache_dir}")

logger.info("正在初始化检索器...")
retriever = Retriever(model_name="moka-ai/m3e-base")
logger.info("检索器初始化完成，开始构建向量索引...")

# 初始化RoBERTa相关性分类器
logger.info("正在初始化RoBERTa相关性分类器...")
relevance_classifier = RobertaRelevanceClassifier(cache_dir=cache_dir)
logger.info("RoBERTa相关性分类器初始化完成")

# 只在首次启动时构建索引
if not is_index_built():
    retriever.build_index(kb.get_knowledge_texts())
    mark_index_built()
    logger.info("向量索引首次构建完成")

logger.info("正在初始化LLM服务...")
llm_service = LLMService()
logger.info("LLM服务初始化完成")

@app.route('/api/query', methods=['POST'])
def query():
    if not is_index_built() or retriever.index is None:
        logger.info("索引尚未构建，正在构建...")
        retriever.build_index(kb.get_knowledge_texts())
        mark_index_built()
    else:
        logger.info("使用现有索引进行检索")
    
    data = request.json
    if not data or 'question' not in data:
        return jsonify({"error": "请提供问题"}), 400
    
    question = data['question']
    top_k = data.get('top_k', 3)
    
    try:
        # 打印用户问题
        logger.info(f"用户问题: {question}")
        
        # 检索相关知识
        indices, scores = retriever.retrieve(question, top_k=top_k * 3)  # 增加检索数量以供后续过滤
        results = []
        
        logger.info(f"检索到 {len(indices)} 条相关知识:")
        
        # 准备用于相关性分类的文档列表
        candidate_docs = []
        for idx, score in zip(indices, scores):
            entry = kb.get_entry_by_index(idx)
            if entry:
                candidate_docs.append({
                    "id": entry.get("id"),
                    "text": entry.get("text"),
                    "faiss_score": score,
                    "faiss_percentage": round(score * 100)
                })
                logger.info(f"  候选知识ID: {entry.get('id')}, FAISS相关性分数: {round(score * 100)}%")
        
        # 使用RoBERTa分类器进行相关性过滤和排序
        logger.info("使用RoBERTa分类器进行深度相关性分析...")
        scored_docs = relevance_classifier.compute_relevance(question, candidate_docs, top_k=top_k)
        
        knowledge_texts = []
        for doc in scored_docs:
            knowledge_texts.append(doc.get("text"))
            # 添加RoBERTa分数到结果中
            results.append({
                "id": doc.get("id"),
                "text": doc.get("text"),
                "faiss_score": doc.get("faiss_score"),
                "faiss_percentage": doc.get("faiss_percentage"),
                "relevance_score": doc.get("roberta_score"),
                "relevance_percentage": doc.get("roberta_score_percentage")
            })
            # 打印最终选中的知识条目和RoBERTa相关性分数
            logger.info(f"  精选知识ID: {doc.get('id')}, RoBERTa相关性分数: {doc.get('roberta_score_percentage')}%")
            logger.info(f"  内容: {doc.get('text')}")
        
        # 打印传递给LLM的知识文本
        logger.info(f"传递给LLM的知识条目数量: {len(knowledge_texts)}")
        
        # 调用大模型进行推理
        llm_response = llm_service.generate_response(question, knowledge_texts)
        
        # 打印LLM的回答
        logger.info(f"LLM回答: {llm_response[:100]}..." if len(llm_response) > 100 else llm_response)
        
        return jsonify({
            "question": question,
            "retrieved_knowledge": results,
            "answer": llm_response
        })
    except Exception as e:
        logger.error(f"处理查询时出错: {str(e)}")
        return jsonify({"error": str(e)}), 500

@app.route('/api/knowledge', methods=['GET'])
def get_knowledge():
    return jsonify(kb.knowledge_data)

@app.route('/api/knowledge', methods=['POST'])
def add_knowledge():
    data = request.json
    if not data or 'text' not in data:
        return jsonify({"error": "请提供知识文本"}), 400
    
    new_id = kb.add_entry(data['text'])
    kb.save(KNOWLEDGE_FILE)
    
    # 知识库更新后，重置索引状态并重建
    logger.info("知识库更新，重新构建索引...")
    retriever.build_index(kb.get_knowledge_texts())
    mark_index_built()
    
    return jsonify({"id": new_id, "text": data['text']})

@app.route('/api/llm/config', methods=['GET'])
def get_llm_config():
    # 过滤返回给前端的配置项，只保留前端需要的参数
    config = llm_service.get_config()
    filtered_config = {
        "max_tokens": config.get("max_tokens", 1000),
        "temperature": config.get("temperature", 0.1)
    }
    return jsonify(filtered_config)

@app.route('/api/llm/config', methods=['POST'])
def update_llm_config():
    data = request.json
    if not data:
        return jsonify({"error": "请提供配置信息"}), 400
    
    # 只保留允许前端修改的参数
    allowed_params = {
        k: v for k, v in data.items() 
        if k in ["max_tokens", "temperature"]
    }
    
    # 获取当前配置并只更新允许的参数
    current_config = llm_service.get_config()
    current_config.update(allowed_params)
    
    llm_service.update_config(current_config)
    return jsonify({"success": True, "config": get_llm_config().json})

@app.route('/api/llm/models', methods=['GET'])
def get_available_models():
    """获取本地可用的模型列表"""
    try:
        models = llm_service.get_available_models()
        return jsonify(models)
    except Exception as e:
        return jsonify({"error": f"获取模型列表失败: {str(e)}"}), 500

if __name__ == '__main__':
    app.run(debug=True, host='0.0.0.0')