from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
import sys

import time
import hashlib
from tqdm import tqdm

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config import OLLAMA_BASE_URL, EMBEDDING_MODEL, CHUNK_SIZE, CHUNK_OVERLAP

def create_embeddings_model():
    """创建嵌入模型"""
    try:
        # 优先尝试使用新的导入路径
        from langchain_ollama import OllamaEmbeddings
        print("使用 langchain_ollama 包导入 OllamaEmbeddings")
    except ImportError:
        # 如果失败，使用旧的导入路径
        from langchain_community.embeddings import OllamaEmbeddings
        print("注意: 使用已弃用的 OllamaEmbeddings，建议安装 langchain_ollama")
    
    print(f"创建嵌入模型: {EMBEDDING_MODEL}, 服务地址: {OLLAMA_BASE_URL}")
    return OllamaEmbeddings(
        base_url=OLLAMA_BASE_URL,
        model=EMBEDDING_MODEL
    )


def create_vector_store(documents, embeddings=None):
    """创建向量存储"""
    if embeddings is None:
        embeddings = create_embeddings_model()
    
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=CHUNK_SIZE, 
        chunk_overlap=CHUNK_OVERLAP
    )
    
    chunks = text_splitter.split_documents(documents)
    return Chroma.from_documents(chunks, embeddings)

def setup_rag(project_path, force_reindex=False):
    """
    设置RAG系统，索引整个项目代码。
    
    参数:
        project_path (str): 项目路径。
        force_reindex (bool): 是否强制重新生成向量。默认为 False。
    """
    # 定义可以索引的文本文件扩展名
    text_extensions = [
        ".py", ".cpp", ".c", ".h", ".hpp", ".java", ".js", ".ts", 
        ".html", ".css", ".md", ".txt", ".json", ".xml", ".yaml", 
        ".yml", ".sh", ".bat", ".cmake", ".rst", ".go", ".php",".cc"
    ]
    
    # 定义需要排除的二进制文件扩展名和目录
    binary_extensions = [
        ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".ico", ".svg",
        ".pdf", ".doc", ".docx", ".ppt", ".pptx", ".xls", ".xlsx",
        ".zip", ".gz", ".tar", ".rar", ".exe", ".dll", ".so", ".o",
        ".bin", ".dat", ".db", ".sqlite", ".pyc", ".class", ".a",
        ".make", ".marks", ".includecache", ".internal", ".mjs"
    ]
    
    excluded_dirs = [
        "build", ".git", "__pycache__", "node_modules", "dist", 
        "venv", "env", ".vscode", ".idea", "CMakeFiles"
    ]
    
    # 缓存文件路径
    cache_file = os.path.join(project_path, ".rag_cache")
    vector_store_path = os.path.join(project_path, "vector_store")
    vector_store_ready_flag = os.path.join(project_path, ".vector_store_ready")

    # 初始化 file_cache
    file_cache = {}

    # 加载缓存
    if not force_reindex and os.path.exists(cache_file):
        try:
            with open(cache_file, "r") as f:
                for line in f:
                    file_path, file_hash = line.strip().split(" ", 1)
                    file_cache[file_path] = file_hash
            print("缓存文件已成功加载")
        except Exception as e:
            print(f"加载缓存文件时出错: {e}")
            file_cache = {}  # 如果加载失败，重置为空字典
    else:
        print("缓存文件不存在或强制重新索引，将重新生成...")
    
    # 预先收集所有符合条件的文件
    print("正在扫描项目文件...")
    valid_files = []
    updated_files = []
    
    for root, dirs, files in os.walk(project_path):
        # 过滤掉不需要的目录
        dirs[:] = [d for d in dirs if not any(excluded in d for excluded in excluded_dirs)]
        
        for file in files:
            file_path = os.path.join(root, file)
            
            # 跳过二进制文件
            if any(file.endswith(ext) for ext in binary_extensions):
                continue
            
            # 只处理文本文件
            if any(file.endswith(ext) for ext in text_extensions):
                if os.path.isfile(file_path):
                    valid_files.append(file_path)
                    
                    # 检查文件是否已缓存且未更新
                    current_hash = get_file_hash(file_path)
                    if file_path not in file_cache or file_cache[file_path] != current_hash:
                        updated_files.append(file_path)
                        file_cache[file_path] = current_hash
    
    print(f"找到 {len(valid_files)} 个有效文本文件，其中 {len(updated_files)} 个需要更新")
    
    # 如果没有需要更新的文件且不强制重新索引，则直接返回
    if not updated_files and not force_reindex:
        print("无需重新索引，加载现有向量存储...")
        if os.path.exists(vector_store_path):
            return load_vector_store(project_path)
        else:
            print("警告: 向量存储文件夹不存在，将强制重新生成...")
            force_reindex = True
    
    # 如果强制重新索引或向量存储不存在，则重新生成
    if force_reindex or not os.path.exists(vector_store_path):
        print("重新生成向量存储...")
        # 使用所有有效文件重新生成向量存储
        updated_files = valid_files

    # 使用 TextLoader 加载有效文件
    from langchain_community.document_loaders import TextLoader
    
    print("正在加载文件内容...")
    documents = []
    for file_path in tqdm(updated_files, desc="加载文件"):
        try:
            loader = TextLoader(file_path, autodetect_encoding=True)
            doc = loader.load()
            documents.extend(doc)
        except Exception as e:
            print(f"加载文件 {file_path} 时出错: {e}")
    
    print(f"成功加载 {len(documents)} 个文件内容")
    
    # 如果没有文档内容，直接退出
    if not documents:
        print("错误: 没有可用的文档内容，无法生成向量存储！")
        return None

    # 创建嵌入模型
    try:
        # 优先尝试使用新的导入路径
        try:
            from langchain_ollama import OllamaEmbeddings
            print("使用 langchain_ollama 包导入 OllamaEmbeddings")
        except ImportError:
            from langchain_community.embeddings import OllamaEmbeddings
            print("注意: 使用已弃用的 OllamaEmbeddings，建议安装 langchain_ollama")
        
        from config import OLLAMA_BASE_URL, EMBEDDING_MODEL
        print(f"创建嵌入模型: {EMBEDDING_MODEL}, 服务地址: {OLLAMA_BASE_URL}")
        embeddings = OllamaEmbeddings(
            base_url=OLLAMA_BASE_URL,
            model=EMBEDDING_MODEL
        )
        
        # 批量处理嵌入
        print("\n开始生成嵌入向量 (这可能需要较长时间)...")
        
        # 分批处理，避免一次性处理太多文档
        batch_size = 50
        total_batches = (len(documents) + batch_size - 1) // batch_size
        
        from langchain.text_splitter import RecursiveCharacterTextSplitter
        from langchain_community.vectorstores import Chroma
        from config import CHUNK_SIZE, CHUNK_OVERLAP
        
        # 文本分割
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=CHUNK_SIZE, 
            chunk_overlap=CHUNK_OVERLAP
        )
        
        # 为每个批次显示进度
        all_chunks = []
        for i in tqdm(range(0, len(documents), batch_size), desc=f"分割文本", total=total_batches):
            batch_docs = documents[i:i+batch_size]
            chunks = text_splitter.split_documents(batch_docs)
            all_chunks.extend(chunks)
            
        print(f"文本分割完成，共 {len(all_chunks)} 个文本块")
        
        # 确保 vector_store 目录存在
        os.makedirs(vector_store_path, exist_ok=True)

        # 分批创建向量存储
        print("\n创建向量存储 (这是最耗时的步骤，请耐心等待)...")
        vector_store = None
        for i in tqdm(range(0, len(all_chunks), batch_size), desc="生成嵌入向量", total=(len(all_chunks) + batch_size - 1) // batch_size):
            batch = all_chunks[i:i+batch_size]
            
            if vector_store is None:
                vector_store = Chroma.from_documents(batch, embeddings, persist_directory=vector_store_path)
            else:
                vector_store.add_documents(batch)
            
            # 添加延迟，避免Ollama过载
            # time.sleep(0.001)
        
        # 保存缓存并标记向量存储已准备好
        with open(cache_file, "w") as f:
            for file_path, file_hash in file_cache.items():
                f.write(f"{file_path} {file_hash}\n")
        
        # 创建标记文件
        with open(vector_store_ready_flag, "w") as f:
            f.write("ready")
        
        print("✓ 向量存储创建完成")
        return vector_store
    except Exception as e:
        print(f"建立RAG索引时发生错误: {e}")
        import traceback
        traceback.print_exc()
        raise




def get_file_hash(file_path):
    """计算文件的哈希值"""
    hasher = hashlib.md5()
    with open(file_path, "rb") as f:
        buf = f.read(4096)
        while buf:
            hasher.update(buf)
            buf = f.read(4096)
    return hasher.hexdigest()


def load_vector_store(project_path):
    """
    加载现有的向量存储。
    
    参数:
        project_path (str): 项目路径。
    
    返回:
        Chroma: 加载的向量存储对象。
    """
    from langchain_community.vectorstores import Chroma
    from config import OLLAMA_BASE_URL, EMBEDDING_MODEL
    
    # 向量存储路径
    vector_store_path = os.path.join(project_path, "vector_store")
    
    # 确保向量存储文件夹存在
    if not os.path.exists(vector_store_path):
        raise FileNotFoundError(f"向量存储文件夹不存在: {vector_store_path}")
    
    # 创建嵌入模型
    try:
        # 优先尝试使用新的导入路径
        try:
            from langchain_ollama import OllamaEmbeddings
        except ImportError:
            from langchain_community.embeddings import OllamaEmbeddings
        
        embeddings = OllamaEmbeddings(
            base_url=OLLAMA_BASE_URL,
            model=EMBEDDING_MODEL
        )
        
        # 加载向量存储
        print("正在加载现有向量存储...")
        vector_store = Chroma(persist_directory=vector_store_path, embedding_function=embeddings)
        print("✓ 向量存储加载完成")
        return vector_store
    except Exception as e:
        print(f"加载向量存储时发生错误: {e}")
        raise



def get_file_vector(file_path):
    """
    获取单个文件的向量表示
    
    Args:
        file_path: 文件的路径
        
    Returns:
        文件内容的向量表示
    """
    try:
        # 导入TextLoader
        from langchain_community.document_loaders import TextLoader
        
        # 加载文件内容
        loader = TextLoader(file_path, autodetect_encoding=True)  # 使用autodetect_encoding避免编码问题
        documents = loader.load()
        
        # 使用与setup_rag相同的嵌入模型
        from langchain_community.embeddings import OllamaEmbeddings
        from config import OLLAMA_BASE_URL, EMBEDDING_MODEL
        
        embeddings = OllamaEmbeddings(
            base_url=OLLAMA_BASE_URL,
            model=EMBEDDING_MODEL
        )
        
        # 获取文件内容的向量表示
        file_content = " ".join([doc.page_content for doc in documents])
        file_vector = embeddings.embed_query(file_content)
        
        return file_vector
    except Exception as e:
        print(f"❌ 获取文件向量时出错: {str(e)}")
        import traceback
        traceback.print_exc()  # 打印详细错误信息
        return None
def create_single_file_vector_store(file_path, embeddings=None):
    """
    为单个文件创建向量存储
    
    Args:
        file_path (str): 文件路径
        embeddings: 嵌入模型，如果为None则创建新的
        
    Returns:
        Chroma: 包含该文件内容的向量存储对象
    """
    from langchain_community.document_loaders import TextLoader
    from langchain.text_splitter import RecursiveCharacterTextSplitter
    from langchain_community.vectorstores import Chroma
    from config import CHUNK_SIZE, CHUNK_OVERLAP
    
    # 创建嵌入模型（如果未提供）
    if embeddings is None:
        embeddings = create_embeddings_model()
    
    try:
        # 1. 加载文件
        loader = TextLoader(file_path, autodetect_encoding=True)
        documents = loader.load()
        
        print(f"已加载文件: {file_path}")
        
        # 2. 分割文本
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=CHUNK_SIZE, 
            chunk_overlap=CHUNK_OVERLAP
        )
        chunks = text_splitter.split_documents(documents)
        
        print(f"文本分割完成，共 {len(chunks)} 个文本块")
        
        # 3. 创建向量存储
        vector_store = Chroma.from_documents(chunks, embeddings)
        
        print("✓ 文件向量存储创建完成")
        return vector_store
    
    except Exception as e:
        print(f"为文件创建向量存储时出错: {e}")
        import traceback
        traceback.print_exc()
        return None
