import os
from tqdm import tqdm
from dotenv import load_dotenv, find_dotenv
from langchain_study_deepseek import langchain_deepseek
from langchain_study_deepseek import get_OutputParser_from_langchain
import sys
from langchain_embedding_zhipu import embedding_langchain
import chromadb
from chromadb import PersistentClient
from langchain_chroma import Chroma
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter
from datetime import datetime
from mixMMRSocre import HybridRetriever
from langchain.schema import messages_from_dict, messages_to_dict

# 读取本地/项目的环境变量。
# find_dotenv()寻找并定位.env文件的路径
# load_dotenv()读取该.env文件，并将其中的环境变量加载到当前的运行环境中  
# 如果你设置的是全局的环境变量，这行代码则没有任何作用。
_ = load_dotenv(find_dotenv())

# 如果你需要通过代理端口访问，你需要如下配置
# os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:7890'
# os.environ["HTTP_PROXY"] = 'http://127.0.0.1:7890'

# 获取folder_path下所有文件路径，储存在file_paths里
def get_file_paths(folder_path = 'data_base/knowledge_db'):
    file_paths = []
    folder_path = folder_path
    for root, dirs, files in os.walk(folder_path):
        for file in files:
            file_path = os.path.join(root, file)
            file_paths.append(file_path)
    return file_paths

def init_vector_store_normal(api_key,embedd_model='Embedding-2'):
    persistent_client = chromadb.PersistentClient()
    collection = persistent_client.get_or_create_collection("collection_name")
    collection.add(ids=["1", "2", "3"], documents=["a", "b", "c"])
    embeddings = embedding_langchain(api_key=api_key,embedd_model=embedd_model)
    vector_store_from_client = Chroma(
        client=persistent_client,
        collection_name="collection_name",
        embedding_function=embeddings,
    )
    return vector_store_from_client

def init_vector_store(api_key,collection_name,persist_directory,embedd_model='Embedding-2'):
    embeddings = embedding_langchain(api_key=api_key,embedd_model=embedd_model)
    
    vector_store = Chroma(
        collection_name=collection_name,
        embedding_function=embeddings,
        persist_directory=persist_directory,  # Where to save data locally, remove if not necessary
    )
    
    return vector_store

# 添加的类型为 Document
def add_documents_to_vectore(file_paths,vector_store):
    loaders = []
    for file_path in tqdm(file_paths):
        file_type = file_path.split('.')[-1]
        if file_type == 'pdf':
            loaders.append(PyPDFLoader(file_path))
        elif file_type == 'md':
            loaders.append(UnstructuredMarkdownLoader(file_path))
    # 加载文档 texts数组中每个元素类型为 document
    texts = []
    for loader in tqdm(loaders):
        texts.extend(loader.load())
        
    # 创建分割器
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
    split_docs = text_splitter.split_documents(texts)
    batch_size  = 32
    print('start loop')
    for i in tqdm(range(0, len(split_docs), batch_size)):
        batch_split_docs = split_docs[i:i+batch_size]  # 切片操作
        print(len(batch_split_docs))
        vector_store.add_documents(batch_split_docs)


def add_documents_to_vector_metadata(file_paths, vector_store, batch_size=32):
    """
    增强版文档处理函数，添加完整元数据
    
    参数:
        file_paths: List[str] - 文件路径列表
        vector_store - 向量存储对象
        batch_size: int - 批量处理大小
        
    返回:
        None
    """
    # 1. 加载文档并注入原始文件元数据
    all_docs = []
    for file_path in tqdm(file_paths, desc="加载文件中"):
        try:
            file_type = file_path.split('.')[-1].lower()
            loader = None
            
            if file_type == 'pdf':
                loader = PyPDFLoader(file_path)
            elif file_type == 'md':
                loader = UnstructuredMarkdownLoader(file_path)
            
            if loader:
                # 加载时注入基础元数据
                docs = loader.load()
                for doc in docs:
                    # 保留原始loader提供的元数据（如PDF页码），并添加新字段
                    doc.metadata.update({
                        "source_file": file_path,
                        "file_type": file_type,
                        "ingest_time": datetime.now().isoformat(),
                        "document_id": f"doc_{hash(file_path)}_{datetime.now().timestamp()}"
                    })
                all_docs.extend(docs)
        except Exception as e:
            print(f"处理文件 {file_path} 时出错: {str(e)}")
            continue

    # 2. 分块处理并增强分块级元数据
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=500,
        chunk_overlap=50,
        add_start_index=True  # 自动添加字符偏移量
    )
    
    split_docs = text_splitter.split_documents(all_docs)
    
    # 3. 为每个分块添加增强元数据
    for i, chunk in enumerate(tqdm(split_docs, desc="处理分块元数据")):
        # 计算原始文档中的页码（适用于PDF）
        if 'page' in chunk.metadata:
            original_page = chunk.metadata['page'] + 1  # 转为1-based
            page_str = f"p{original_page}"
        else:
            page_str = "p1"
            
        chunk.metadata.update({
            "chunk_id": f"chunk_{i}_{hash(chunk.page_content)}",
            "chunk_seq": f"{i+1}/{len(split_docs)}",
            "page_reference": page_str,
            "processing_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "text_length": len(chunk.page_content),
            "word_count": len(chunk.page_content.split())
        })
    
    # 4. 分批存入向量库
    for i in tqdm(range(0, len(split_docs), batch_size), desc="存入向量库"):
        batch = split_docs[i:i + batch_size]
        try:
            vector_store.add_documents(batch)
            # 可选：添加成功日志
            print(f"成功存入批次 {i//batch_size + 1}, 包含 {len(batch)} 个文档块")
        except Exception as e:
            print(f"批次 {i//batch_size + 1} 存入失败: {str(e)}")
            # 失败重试逻辑（可选）
            for doc in batch:
                try:
                    vector_store.add_documents([doc])
                except:
                    print(f"文档块 {doc.metadata.get('chunk_id')} 持久化失败")

    print(f"处理完成！总共处理 {len(file_paths)} 个文件，生成 {len(split_docs)} 个文档块")


def find_close_document(retriever,input_text="What is LangChain?"):
    # Retrieve the most similar text
    retrieved_documents = retriever.invoke(input_text)

    # show the retrieved document's content
    return retrieved_documents

def get_retriever():
    zhipu_api_key = os.environ['ZHIPUAI_API_KEY']
    vector_store = init_vector_store(zhipu_api_key,collection_name='zhongyao_cln',persist_directory='zhongyao_data/vector_db')
     # 检查支持的参数
    hybrid_retriever = HybridRetriever(
        vector_store=vector_store,
        threshold=0.25,  # 相似度阈值
        mmr_k=3,        # 最终返回文档数
        lambda_mult=0.6  # MMR多样性权重
    )
    return hybrid_retriever

if __name__=='__main__':
    zhipu_api_key = os.environ['ZHIPUAI_API_KEY']
    print('Now init vector store')
    vector_store = init_vector_store(zhipu_api_key,collection_name='zhongyao_cln_metadata',persist_directory='zhongyao_data/vector_db')
    print('vector_db has been init')
    
    print('Now add documents in vetore_db')
    collection_name="zhongyao_cln_metadata"
    client = PersistentClient(path='zhongyao_data/vector_db')
    collection_name = collection_name
    collection = client.get_collection(collection_name)
    data_count = collection.count()
    if data_count>0:
        print('No need add ')
    else:
        file_paths = get_file_paths(folder_path='zhongyao_data/knowledge_db')
        print('data is none, need add ')
        # add_documents_to_vectore(file_paths=file_paths,vector_store=vector_store)
        add_documents_to_vector_metadata(file_paths=file_paths,vector_store=vector_store)
