import asyncio
import json
import os
from typing import List, Dict, Any, Tuple, Optional
import numpy as np
from kg_retrieval.env_config import MODEL_CONFIG

class OptimizedEmbedding:
    def __init__(self, model_path=None, use_fp16=None, model_type=None):
        # 使用环境变量或参数
        self.model_type = model_type or MODEL_CONFIG["EMBEDDING_MODEL_TYPE"]
        self.offline_mode = MODEL_CONFIG["OFFLINE_MODE"]
        
        # 设置HuggingFace环境变量
        os.environ["HF_ENDPOINT"] = MODEL_CONFIG["HF_ENDPOINT"]
        os.environ["HF_HUB_DISABLE_XET"] = MODEL_CONFIG["HF_HUB_DISABLE_XET"]
        
        # 在离线模式下，跳过模型加载
        if self.offline_mode:
            print(f"运行在离线模式，使用随机向量替代实际嵌入")
            self.emb_model = None
            self.embedding_dim = 1024  # 默认维度
            return
        
        try:
            if self.model_type == "bge":
                # 加载BGE模型
                self.model_path = model_path or MODEL_CONFIG["BGE_MODEL_PATH"]
                self.use_fp16 = use_fp16 if use_fp16 is not None else MODEL_CONFIG["BGE_MODEL_USE_FP16"]
                print(f"加载BGE嵌入模型: {self.model_path}")
                
                try:
                    from FlagEmbedding import BGEM3FlagModel
                    # 判断是否为本地路径
                    if os.path.exists(self.model_path):
                        # 本地模型路径
                        self.emb_model = BGEM3FlagModel(self.model_path, use_fp16=self.use_fp16, trust_remote_code=True)
                    else:
                        # HuggingFace模型
                        self.emb_model = BGEM3FlagModel(self.model_path, use_fp16=self.use_fp16)
                    self.embedding_dim = 1024  # BGE-M3 默认维度
                except ImportError:
                    print("警告：FlagEmbedding未安装，将尝试使用SentenceTransformer")
                    self.model_type = "sentence_transformer"
                    
            if self.model_type == "sentence_transformer":
                # 加载SentenceTransformer模型
                self.model_path = model_path or MODEL_CONFIG["ST_MODEL_PATH"]
                self.use_fp16 = use_fp16 if use_fp16 is not None else MODEL_CONFIG["ST_MODEL_USE_FP16"]
                print(f"加载SentenceTransformer嵌入模型: {self.model_path}")
                
                try:
                    from sentence_transformers import SentenceTransformer
                    # 创建模型
                    self.emb_model = SentenceTransformer(self.model_path)
                    
                    # 获取模型输出维度
                    test_embedding = self.emb_model.encode(["测试维度"])[0]
                    self.embedding_dim = len(test_embedding)
                    print(f"嵌入维度: {self.embedding_dim}")
                    
                    # 设置为FP16模式（如果配置了的话）
                    if self.use_fp16:
                        try:
                            self.emb_model.half()
                            print("已启用FP16模式")
                        except:
                            print("无法启用FP16模式，继续使用默认精度")
                except ImportError:
                    print("警告：sentence_transformers未安装，将使用随机向量替代")
                    self.emb_model = None
                    self.embedding_dim = 1536  # Qwen3默认维度
        except Exception as e:
            print(f"模型加载失败: {e}，将使用随机向量替代")
            self.emb_model = None
            self.embedding_dim = 1024  # 默认维度
    
    @classmethod
    def preload(cls):
        """预加载模型，返回实例。用于启动时提前初始化模型"""
        instance = cls()
        # 预热模型，使用一个简单的文本进行测试，确保模型已经完全加载
        if instance.emb_model:
            try:
                test_text = "测试文本嵌入预热"
                
                if instance.model_type == "bge":
                    # 预热BGE模型
                    instance.emb_model.encode([test_text])
                elif instance.model_type == "sentence_transformer":
                    # 预热SentenceTransformer模型
                    instance.emb_model.encode([test_text])
                
                print(f"嵌入模型预热完成: {instance.model_path}")
            except Exception as e:
                print(f"嵌入模型预热失败: {e}")
        
        return instance
        
    async def batch_embed(self, texts: List[str]) -> List:
        """一次性对多个文本进行嵌入"""
        if not texts:
            return []
        
        if self.emb_model is None:
            # 使用随机向量作为替代（仅用于开发测试）
            return [np.random.rand(self.embedding_dim).tolist() for _ in texts]
        
        try:
            if self.model_type == "bge":
                # BGE模型的编码方式
                vectors = self.emb_model.encode(texts)['dense_vecs']
                return vectors.tolist()
            elif self.model_type == "sentence_transformer":
                # SentenceTransformer的编码方式
                vectors = self.emb_model.encode(texts)
                if isinstance(vectors, np.ndarray):
                    return vectors.tolist()
                return vectors
            else:
                # 未知模型类型，使用随机向量
                print(f"未知模型类型: {self.model_type}，使用随机向量")
                return [np.random.rand(self.embedding_dim).tolist() for _ in texts]
        except Exception as e:
            print(f"嵌入生成错误: {e}，使用随机向量")
            return [np.random.rand(self.embedding_dim).tolist() for _ in texts]
    
    async def process_file_content(self, content: str, file_name: str, file_url: str, chat_model=None) -> Dict[str, Any]:
        """优化的文件内容处理流程，一次性生成多个所需内容"""
        # 由于chat_model可能不可用，这里添加备用方案
        if chat_model is None:
            # 如果没有提供聊天模型，使用基本方法提取信息
            return await self._fallback_process_content(content, file_name, file_url)
            
        batch_prompt = f"""
        请根据以下文件内容，同时生成标题、摘要、关键词列表和3个模拟查询：
        
        文件名称：{file_name}
        文件路径：{file_url}
        文件内容：{content[:2000]}...
        
        请按照以下JSON格式返回所有内容：
        {{
            "title": "简短精炼的文件标题，不超过50字",
            "summary": "详细的文件摘要，包含关键信息",
            "keywords": ["关键词1", "关键词2", "关键词3", "关键词4", "关键词5"],
            "queries": [
                "模拟用户可能的查询问题1",
                "模拟用户可能的查询问题2",
                "模拟用户可能的查询问题3"
            ]
        }}
        """
        
        # 调用LLM生成所有内容
        try:
            result = await chat_model(prompt=batch_prompt, query="一次性生成所有文件描述内容")
            processed_result = json.loads(result)
        except Exception as e:
            print(f"批量生成内容失败: {e}")
            # 回退到简单内容生成
            return await self._fallback_process_content(content, file_name, file_url)
        
        # 准备所有需要嵌入的文本
        texts_to_embed = [
            processed_result["title"],
            processed_result["summary"],
            processed_result["queries"][0],
            processed_result["queries"][1],
            processed_result["queries"][2]
        ]
        
        # 批量嵌入计算
        embeddings = await self.batch_embed(texts_to_embed)
        
        # 组装结果
        return {
            "filename": file_name,
            "file_url": file_url,
            "suffix": file_url.split('.')[-1] if '.' in file_url else "",
            "fileclass": file_url.split('/')[-2] if '/' in file_url else "",
            "keywords": processed_result["keywords"],
            "title": processed_result["title"],
            "title_vector": embeddings[0],
            "summary_text": processed_result["summary"],
            "summary_text_vector": embeddings[1],
            "query_1": processed_result["queries"][0],
            "query_vector_1": embeddings[2],
            "query_2": processed_result["queries"][1],
            "query_vector_2": embeddings[3],
            "query_3": processed_result["queries"][2],
            "query_vector_3": embeddings[4],
            "file_status": '0',
            "company": '',
            "model_type": self.model_type,
            "embedding_dim": self.embedding_dim
        }
    
    async def _fallback_process_content(self, content: str, file_name: str, file_url: str) -> Dict[str, Any]:
        """简单的回退方法，当LLM不可用时使用"""
        # 提取内容前150个字符作为摘要
        summary = content[:150] + "..." if len(content) > 150 else content
        
        # 文件名作为标题，可能的关键词
        keywords = [file_name.split('.')[0]]
        if '/' in file_url:
            keywords.append(file_url.split('/')[-2])
        
        # 模拟查询
        queries = [
            f"查找{file_name}文件",
            f"搜索{file_name.split('.')[0]}相关内容",
            f"查询{file_name.split('.')[-1]}类型文件"
        ]
        
        # 生成嵌入
        texts_to_embed = [
            file_name,  # 标题
            summary,    # 摘要
            queries[0], # 查询1
            queries[1], # 查询2
            queries[2]  # 查询3
        ]
        
        embeddings = await self.batch_embed(texts_to_embed)
        
        # 返回结果
        return {
            "filename": file_name,
            "file_url": file_url,
            "suffix": file_url.split('.')[-1] if '.' in file_url else "",
            "fileclass": file_url.split('/')[-2] if '/' in file_url else "",
            "keywords": keywords,
            "title": file_name,
            "title_vector": embeddings[0],
            "summary_text": summary,
            "summary_text_vector": embeddings[1],
            "query_1": queries[0],
            "query_vector_1": embeddings[2],
            "query_2": queries[1],
            "query_vector_2": embeddings[3],
            "query_3": queries[2],
            "query_vector_3": embeddings[4],
            "file_status": '0',
            "company": '',
            "model_type": self.model_type,
            "embedding_dim": self.embedding_dim
        }
    
    def _get_model_info(self) -> Dict[str, Any]:
        """返回当前模型的信息"""
        info = {
            "model_type": self.model_type,
            "model_path": getattr(self, "model_path", None),
            "embedding_dim": self.embedding_dim,
            "use_fp16": getattr(self, "use_fp16", None),
            "offline_mode": self.offline_mode,
        }
        
        # 添加模型特定信息
        if self.model_type == "bge" and hasattr(self, "emb_model") and self.emb_model:
            try:
                info["model_details"] = {
                    "model_name": self.model_path.split("/")[-1] if "/" in self.model_path else self.model_path,
                    "initialized": self.emb_model is not None
                }
            except:
                pass
        elif self.model_type == "sentence_transformer" and hasattr(self, "emb_model") and self.emb_model:
            try:
                info["model_details"] = {
                    "model_name": self.model_path.split("/")[-1] if "/" in self.model_path else self.model_path,
                    "initialized": self.emb_model is not None
                }
            except:
                pass
        
        return info 