import os
import chromadb
import httpx
import numpy as np
from typing import List
from chromadb.config import Settings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import PyPDFLoader, TextLoader, Docx2txtLoader
from dotenv import load_dotenv
from openai import OpenAI

# 全局设置禁用CoreML
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["CHROMA_NO_COREML"] = "1"
os.environ["USE_ONNX"] = "1"
os.environ["COREML_ENABLE_CPU_ONLY"] = "1"

# 创建一个自定义的HTTP客户端，避免proxies参数问题
class CustomHTTPClient(httpx.Client):
    def __init__(self, **kwargs):
        # 过滤掉不支持的参数
        supported_params = {}
        for key, value in kwargs.items():
            if key != 'proxies':
                supported_params[key] = value
        
        super().__init__(**supported_params)

# 加载环境变量
load_dotenv()

# 配置向量存储
class VectorStoreManager:
    def __init__(self, persist_directory="data/chroma_db"):
        # 创建持久化目录
        if not os.path.exists(persist_directory):
            os.makedirs(persist_directory)
        
        # 使用新版ChromaDB客户端初始化方式
        # 注意：新版不再需要Settings对象，直接在PersistentClient中配置
        try:
            # 尝试新的初始化方式（适用于较新版本）
            self.client = chromadb.PersistentClient(
                path=persist_directory,
                # 禁用匿名遥测
                anonymized_telemetry=False,
                # 禁用CoreML
                chroma_db_impl="duckdb+parquet"
            )
        except Exception as e:
            print(f"使用新版初始化方式失败，尝试旧版方式: {type(e).__name__}: {str(e)}")
            # 降级使用旧版初始化方式
            try:
                self.client = chromadb.PersistentClient(path=persist_directory)
            except Exception as e2:
                print(f"使用旧版初始化方式也失败: {type(e2).__name__}: {str(e2)}")
                # 最后的备选方案：使用内存客户端
                print("使用内存客户端作为备选方案")
                self.client = chromadb.Client()
        
        print(f"成功初始化向量存储客户端，路径: {persist_directory}")
        
        # 初始化OpenAI客户端用于生成嵌入
        self.embedding_client = None
        self.use_external_embedding = False
        
        # 从.env文件读取配置，注意环境变量名称
        env_embedding_model = os.getenv("EMBEDDING_MODEL")
        deepseek_embed_model = os.getenv("DEEPSEEK_EMBED_MODEL")
        
        # 智能选择嵌入模型
        # 优先使用DeepSeek特定的嵌入模型配置
        if deepseek_embed_model:
            self.embed_model = deepseek_embed_model
        # 如果没有设置DeepSeek特定的嵌入模型，尝试使用通用的嵌入模型配置
        elif env_embedding_model:
            self.embed_model = env_embedding_model
        # 默认使用DeepSeek的嵌入模型
        else:
            self.embed_model = "deepseek-embed-text"
        
        deepseek_api_key = os.getenv("DEEPSEEK_API_KEY")
        deepseek_base_url = os.getenv("DEEPSEEK_BASE_URL", "https://api.deepseek.com/v1")
        
        print(f"使用的嵌入模型配置 - EMBEDDING_MODEL: {env_embedding_model}, DEEPSEEK_EMBED_MODEL: {deepseek_embed_model}")
        print(f"最终使用的嵌入模型: {self.embed_model}")
        print(f"DeepSeek API URL: {deepseek_base_url}")
        
        try:
            # 配置DeepSeek API作为嵌入提供者
            # 首先检查是否有API密钥
            if not deepseek_api_key:
                print("警告: 未设置DeepSeek API密钥，将使用ChromaDB内置模型")
                self.use_external_embedding = False
            else:
                # 使用自定义HTTP客户端避免proxies参数问题
                custom_http_client = CustomHTTPClient(
                    base_url=deepseek_base_url,
                    timeout=30.0
                )
                
                self.embedding_client = OpenAI(
                    api_key=deepseek_api_key,
                    base_url=deepseek_base_url,
                    http_client=custom_http_client
                )
                print(f"成功初始化嵌入客户端")
                self.use_external_embedding = True
        except Exception as e:
            print(f"初始化嵌入客户端失败，将使用ChromaDB内置模型: {type(e).__name__}: {str(e)}")
            # 打印更详细的错误信息
            if hasattr(e, 'status_code'):
                print(f"HTTP状态码: {e.status_code}")
            if hasattr(e, 'response'):
                try:
                    print(f"响应内容: {e.response.json()}")
                except:
                    pass
            
            self.embedding_client = None
            self.use_external_embedding = False
        
        print("成功初始化向量存储管理器")
        
    def load_document(self, file_path):
        """加载文档并返回文档对象"""
        file_extension = os.path.splitext(file_path)[1].lower()
        
        if file_extension == ".pdf":
            loader = PyPDFLoader(file_path)
        elif file_extension == ".txt":
            loader = TextLoader(file_path, encoding="utf-8")
        elif file_extension in [".docx", ".doc"]:
            loader = Docx2txtLoader(file_path)
        else:
            raise ValueError(f"不支持的文件格式: {file_extension}")
        
        documents = loader.load()
        return documents
    
    def split_documents(self, documents, chunk_size=1000, chunk_overlap=200):
        """将文档分割成小块"""
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=chunk_size,
            chunk_overlap=chunk_overlap,
            length_function=len,
        )
        split_docs = text_splitter.split_documents(documents)
        return split_docs
    
    def generate_embeddings(self, texts):
        """生成文本的嵌入向量"""
        if self.use_external_embedding and self.embedding_client:
            try:
                print(f"使用外部API生成 {len(texts)} 个嵌入向量")
                print(f"调用参数: model={self.embed_model}, input长度={len(texts)}")
                
                # 确保texts是列表类型
                if isinstance(texts, str):
                    texts = [texts]
                
                # 检查是否超过最大批量大小（DeepSeek API通常支持最多2048个文本/请求）
                if len(texts) > 2048:
                    print(f"警告: 文本数量 {len(texts)} 超过API限制，将截断为2048个")
                    texts = texts[:2048]
                
                response = self.embedding_client.embeddings.create(
                    input=texts,
                    model=self.embed_model
                )
                
                # 添加响应日志
                print(f"嵌入API调用成功，返回了 {len(response.data)} 个向量")
                return [item.embedding for item in response.data]
            except Exception as e:
                print(f"生成嵌入向量时出错: {type(e).__name__}: {str(e)}")
                # 打印更详细的错误信息
                if hasattr(e, 'status_code'):
                    print(f"HTTP状态码: {e.status_code}")
                if hasattr(e, 'response'):
                    try:
                        print(f"响应内容: {e.response.json()}")
                    except:
                        pass
                
                # 尝试使用备选的简单嵌入方法
                try:
                    print("使用备选的简单嵌入方法")
                    embeddings = self._generate_simple_embeddings(texts)
                    print(f"成功生成 {len(embeddings)} 个简单嵌入向量")
                    return embeddings
                except Exception as e:
                    print(f"生成简单嵌入向量时出错: {e}")
                    
                    # 如果所有方法都失败，返回None让ChromaDB尝试自动处理
                    print("尝试让ChromaDB自动处理嵌入")
                    return None  # 返回None表示使用ChromaDB自动生成嵌入
        else:
            # 使用ChromaDB内置嵌入模型（备用方案）
            print(f"使用ChromaDB内置模型生成嵌入向量")
            return None
    
    def _generate_simple_embeddings(self, texts: List[str]) -> List[List[float]]:
        """使用简单的基于字符频率的嵌入方法作为备选方案

        Args:
            texts: 要生成嵌入的文本列表

        Returns:
            嵌入向量列表
        """
        # 创建一个固定长度的嵌入向量
        embedding_dim = 384  # 与常见嵌入模型维度一致
        embeddings = []
        
        for text in texts:
            # 计算字符频率作为嵌入基础
            char_freq = {}
            for char in text:
                if char in char_freq:
                    char_freq[char] += 1
                else:
                    char_freq[char] = 1
            
            # 创建固定长度的嵌入向量
            embedding = [0.0] * embedding_dim
            
            # 使用简单的哈希方法将字符映射到向量位置
            for char, freq in char_freq.items():
                # 使用字符的ASCII值作为哈希输入
                idx = hash(char) % embedding_dim
                embedding[idx] += freq / len(text)  # 归一化频率
            
            # 归一化整个向量
            norm = np.linalg.norm(embedding) if np.linalg.norm(embedding) > 0 else 1
            normalized_embedding = (np.array(embedding) / norm).tolist()
            
            embeddings.append(normalized_embedding)
        
        return embeddings
    
    def create_collection(self, collection_name):
        """创建或获取一个集合"""
        try:
            # 尝试创建集合，如果已存在则获取
            collection = self.client.get_or_create_collection(
                name=collection_name,
                metadata={"description": f"Collection for {collection_name}"}
            )
            print(f"成功创建或获取集合: {collection_name}")
            return collection
        except Exception as e:
            print(f"创建或获取集合时出错: {type(e).__name__}: {str(e)}")
            # 降级处理：尝试直接获取集合
            try:
                collection = self.client.get_collection(name=collection_name)
                print(f"降级成功，获取到集合: {collection_name}")
                return collection
            except Exception as e2:
                print(f"降级也失败: {type(e2).__name__}: {str(e2)}")
                return None
    
    def add_documents_to_collection(self, collection, documents, metadata_list=None):
        """将文档添加到集合中"""
        if not collection:
            print("错误: 集合未初始化，无法添加文档")
            return False
        
        try:
            # 提取文档内容
            texts = [doc.page_content for doc in documents]
            
            # 生成ID列表
            ids = [f"doc_{i}" for i in range(len(texts))]
            
            # 准备元数据
            metadatas = []
            for i, doc in enumerate(documents):
                meta = doc.metadata.copy()
                # 如果提供了额外的元数据列表，合并它
                if metadata_list and i < len(metadata_list):
                    meta.update(metadata_list[i])
                metadatas.append(meta)
            
            # 生成嵌入向量
            embeddings = self.generate_embeddings(texts)
            
            # 添加到集合
            if embeddings:
                collection.add(
                    documents=texts,
                    embeddings=embeddings,
                    metadatas=metadatas,
                    ids=ids
                )
            else:
                # 如果没有生成嵌入，让ChromaDB自动处理
                collection.add(
                    documents=texts,
                    metadatas=metadatas,
                    ids=ids
                )
            
            print(f"成功添加 {len(texts)} 个文档到集合")
            return True
        except Exception as e:
            print(f"添加文档到集合时出错: {type(e).__name__}: {str(e)}")
            # 打印更详细的错误信息
            if hasattr(e, 'status_code'):
                print(f"HTTP状态码: {e.status_code}")
            if hasattr(e, 'response'):
                try:
                    print(f"响应内容: {e.response.json()}")
                except:
                    pass
            return False
    
    def search_documents(self, collection, query, n_results=5):
        """在集合中搜索相似文档"""
        if not collection:
            print("错误: 集合未初始化，无法搜索文档")
            return []
        
        try:
            # 生成查询嵌入
            query_embedding = None
            if self.use_external_embedding and self.embedding_client:
                try:
                    query_embedding = self.generate_embeddings([query])[0]
                except Exception as e:
                    print(f"生成查询嵌入时出错，使用ChromaDB内置嵌入: {e}")
            
            # 执行搜索
            if query_embedding:
                results = collection.query(
                    query_embeddings=[query_embedding],
                    n_results=n_results,
                    include=["documents", "metadatas", "distances"]
                )
            else:
                # 使用ChromaDB内置嵌入
                results = collection.query(
                    query_texts=[query],
                    n_results=n_results,
                    include=["documents", "metadatas", "distances"]
                )
            
            # 格式化结果
            formatted_results = []
            if results and "documents" in results and results["documents"] and len(results["documents"]) > 0:
                for i in range(len(results["documents"][0])):
                    formatted_results.append({
                        "content": results["documents"][0][i],
                        "metadata": results["metadatas"][0][i] if results["metadatas"] and len(results["metadatas"]) > 0 and i < len(results["metadatas"][0]) else {},
                        "distance": results["distances"][0][i] if results["distances"] and len(results["distances"]) > 0 and i < len(results["distances"][0]) else 0
                    })
            
            print(f"搜索完成，找到 {len(formatted_results)} 个相似文档")
            return formatted_results
        except Exception as e:
            print(f"搜索文档时出错: {type(e).__name__}: {str(e)}")
            # 打印更详细的错误信息
            if hasattr(e, 'status_code'):
                print(f"HTTP状态码: {e.status_code}")
            if hasattr(e, 'response'):
                try:
                    print(f"响应内容: {e.response.json()}")
                except:
                    pass
            return []
    
    def get_all_collections(self):
        """获取所有集合"""
        try:
            collections = self.client.list_collections()
            return [col.name for col in collections]
        except Exception as e:
            print(f"获取所有集合时出错: {type(e).__name__}: {str(e)}")
            return []
    
    def get_collections(self):
        """获取所有集合（与前端API兼容）"""
        # 直接调用已有的get_all_collections方法
        return self.get_all_collections()
    
    def add_documents(self, collection_name, documents, metadata_list=None):
        """添加文档到指定集合（为保持API兼容性的包装方法）"""
        try:
            # 获取或创建集合
            collection = self.create_collection(collection_name)
            if not collection:
                print(f"创建集合 '{collection_name}' 失败，无法添加文档")
                return {
                    "status": "error",
                    "message": f"创建集合 '{collection_name}' 失败"
                }
            
            # 调用实际的添加文档方法
            result = self.add_documents_to_collection(collection, documents, metadata_list)
            
            if result:
                # 返回成功响应
                return len(documents)  # 返回添加的文档数量
            else:
                # 返回失败响应
                return {
                    "status": "error",
                    "message": "添加文档失败"
                }
        except Exception as e:
            print(f"添加文档时出错: {type(e).__name__}: {str(e)}")
            return {
                "status": "error",
                "message": str(e)
            }
    
    def search(self, collection_name, query, top_k=5):
        """在指定集合中搜索相似文档（为保持API兼容性的包装方法）"""
        try:
            # 获取集合
            collection = self.client.get_collection(name=collection_name)
            if not collection:
                print(f"集合 '{collection_name}' 不存在，无法搜索")
                return []
            
            # 调用实际的搜索方法
            results = self.search_documents(collection, query, top_k)
            
            # 确保返回格式与调用方期望一致
            if results:
                # 转换结果格式以兼容不同的调用方
                formatted_results = []
                for result in results:
                    # 确保包含必要的字段
                    formatted_result = {
                        "content": result["content"],
                        "metadata": result["metadata"],
                        "score": 1 - result["distance"] if "distance" in result else 0  # 转换距离为相似度分数
                    }
                    formatted_results.append(formatted_result)
                return formatted_results
            else:
                return []
        except Exception as e:
            print(f"搜索时出错: {type(e).__name__}: {str(e)}")
            return []
    
    def delete_collection(self, collection_name):
        """删除一个集合"""
        try:
            self.client.delete_collection(name=collection_name)
            print(f"成功删除集合: {collection_name}")
            return True
        except Exception as e:
            print(f"删除集合时出错: {type(e).__name__}: {str(e)}")
            return False
    
    def get_all_documents_in_collection(self, collection_name):
        """获取集合中的所有文档"""
        try:
            # 获取集合
            collection = self.client.get_collection(name=collection_name)
            if not collection:
                print(f"集合 '{collection_name}' 不存在，无法获取文档")
                return []
            
            # 获取集合中的所有文档
            results = collection.get()
            
            # 格式化结果
            documents = []
            if results and "ids" in results and "documents" in results and "metadatas" in results:
                for i, doc_id in enumerate(results["ids"]):
                    documents.append({
                        "id": doc_id,
                        "content": results["documents"][i] if i < len(results["documents"]) else "",
                        "metadata": results["metadatas"][i] if i < len(results["metadatas"]) else {}
                    })
            
            print(f"成功获取集合 '{collection_name}' 中的 {len(documents)} 个文档")
            return documents
        except Exception as e:
            print(f"获取集合中文档时出错: {type(e).__name__}: {str(e)}")
            return []
    
    def delete_document_from_collection(self, collection_name, document_id):
        """从集合中删除单个文档"""
        try:
            # 获取集合
            collection = self.client.get_collection(name=collection_name)
            if not collection:
                print(f"集合 '{collection_name}' 不存在，无法删除文档")
                return False
            
            # 删除文档
            collection.delete(ids=[document_id])
            print(f"成功从集合 '{collection_name}' 中删除文档 '{document_id}'")
            return True
        except Exception as e:
            print(f"删除文档时出错: {type(e).__name__}: {str(e)}")
            return False

# 简单测试函数（仅在直接运行此文件时执行）
if __name__ == "__main__":
    print("开始测试向量存储管理器...")
    vector_store = VectorStoreManager()
    print(f"测试完成，嵌入客户端状态: {'已初始化' if vector_store.embedding_client else '未初始化'}")
    print(f"使用外部嵌入: {vector_store.use_external_embedding}")
    
    # 测试创建集合
    collection = vector_store.create_collection("test_collection")
    if collection:
        print(f"成功创建测试集合")
        
        # 测试添加简单文档
        test_docs = [
            {"page_content": "这是测试文档1", "metadata": {"source": "test1.txt"}},
            {"page_content": "这是测试文档2", "metadata": {"source": "test2.txt"}}
        ]
        
        # 转换为Document对象
        from langchain_core.documents import Document
        documents = [Document(page_content=doc["page_content"], metadata=doc["metadata"]) for doc in test_docs]
        
        success = vector_store.add_documents_to_collection(collection, documents)
        print(f"添加测试文档: {'成功' if success else '失败'}")
        
        # 测试搜索
        if success:
            results = vector_store.search_documents(collection, "测试文档")
            print(f"搜索结果数量: {len(results)}")
            for i, result in enumerate(results):
                print(f"结果 {i+1}: {result['content'][:50]}...")
        
        # 清理测试集合
        vector_store.delete_collection("test_collection")