import os
import re
import shutil
from uuid import uuid4
#文本嵌入
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders import Docx2txtLoader
from langchain_community.document_loaders.csv_loader import CSVLoader
#from langchain_community.embeddings import CohereEmbeddings
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.text_splitter import CharacterTextSplitter,MarkdownTextSplitter,RecursiveCharacterTextSplitter
from langchain.schema import Document
from pathlib import Path
from langchain_community.document_loaders.text import TextLoader
from parser_ocr import llama_parse
from qdrant_client import QdrantClient
from langchain_qdrant import QdrantVectorStore
import threading
from typing import Dict
from qdrant_client.http.models import Distance, VectorParams
from langchain_community.document_loaders import UnstructuredPowerPointLoader
#from langchain_community.embeddings import OllamaEmbeddings
# 根据三级标题进行拆分，并返回向量库文本
# 全局客户端缓存
_qdrant_clients: Dict[str, QdrantClient] = {}
_client_lock = threading.Lock()
def get_local_qdrant_client(vector_db_path):
    """获取本地 Qdrant 客户端（强制本地模式）"""
    with _client_lock:
        abs_path = str(Path(vector_db_path).resolve())
        
        if abs_path not in _qdrant_clients:
            try:
                storage_path = Path(vector_db_path)
                storage_path.mkdir(parents=True, exist_ok=True)
                
                print(f"🔍 创建本地 Qdrant 客户端: {storage_path}")
                
                # 强制使用本地存储，不连接远程服务
                client = QdrantClient(
                    path=str(storage_path),
                    # 确保不连接远程
                    url=None,
                    host=None,
                    port=None,
                    prefer_grpc=False
                )
                
                # 测试本地连接
                collections = client.get_collections()
                print(f"✅ 本地 Qdrant 连接成功: {[col.name for col in collections.collections]}")
                
                _qdrant_clients[abs_path] = client
                return client
                
            except Exception as e:
                print(f"❌ 本地 Qdrant 创建失败: {e}")
                
                # 如果是端口占用问题，尝试清理
                if "10061" in str(e) or "连接" in str(e):
                    print("🔄 检测到连接问题，尝试清理...")
                    try:
                        # 清理可能的锁文件
                        for lock_file in storage_path.rglob("*.lock"):
                            try:
                                lock_file.unlink()
                                print(f"🗑️ 删除锁文件: {lock_file}")
                            except:
                                pass
                        
                        # 重试创建
                        client = QdrantClient(path=str(storage_path))
                        _qdrant_clients[abs_path] = client
                        print("✅ 清理后重试成功")
                        return client
                        
                    except Exception as retry_e:
                        print(f"❌ 重试也失败: {retry_e}")
                
                return None
        
        return _qdrant_clients[abs_path]
def close_all_qdrant_clients():
    """关闭所有 Qdrant 客户端"""
    with _client_lock:
        for path, client in _qdrant_clients.items():
            try:
                client.close()
                print(f"✅ 已关闭客户端: {path}")
            except:
                pass
        _qdrant_clients.clear()
async def add_vector_db(vector_db_path,collection_name,embeddings,documents):
    """添加向量数据库"""
    uuids = [str(uuid4()) for _ in range(len(documents))]
    # 获取客户端（重用现有连接）
    client = get_local_qdrant_client(vector_db_path)
    if client is None:
        print(f"❌ 无法获取 Qdrant 客户端")
        return None
    
    # 检查集合是否存在，而不是加载整个向量存储
    try:
        collections = client.get_collections()
        collection_names = [col.name for col in collections.collections]
        collection_exists = collection_name in collection_names
    except Exception as e:
        print(f"❌ 检查集合时出错: {e}")
        collection_exists = False
    
    if collection_exists:
        print(f"✅ 向量库已存在，添加文档: {collection_name}")
        # 直接使用现有客户端创建向量存储
        vectorstore = QdrantVectorStore(
            client=client,
            collection_name=collection_name,
            embedding=embeddings
        )
        await vectorstore.aadd_documents(documents, ids=uuids)
        print(f"✅ 已添加 {len(documents)} 个文档到现有向量库")
    else:
        print(f"🆕 创建新的向量库: {collection_name}")
        # 使用现有客户端创建新向量库
        client.create_collection(
            collection_name=collection_name,
            vectors_config=VectorParams(size=1024, distance=Distance.COSINE),
        )
        vector_store = QdrantVectorStore(
            client=client,
            collection_name=collection_name,
            embedding=embeddings
        )
        await vector_store.aadd_documents(documents, ids=uuids)
        print(f"✅ 成功创建新向量库，包含 {len(documents)} 个文档")
    return uuids

def create_vector_store_collection(vector_db_path, collection_name):
    """创建向量存储"""
    try:
        # 检查存储目录是否存在
        if not Path(vector_db_path).exists():
            print(f"❌ 存储目录不存在: {vector_db_path}")
            return None
        client = get_local_qdrant_client(vector_db_path)
        if client is None:
            print(f"❌ 无法获取 Qdrant 客户端")
            return None
        # 创建向量存储
        print(f"🔍 正在创建向量库: {collection_name} ...")
        client.create_collection(
            collection_name=collection_name,
            vectors_config=VectorParams(size=1024, distance=Distance.COSINE),
        )
    except Exception as e:
        print(f"❌ 向量库创建失败: {e}")
        return None

# 加载现有向量库
def load_vector_store(vector_db_path,collection_name,embeddings):
    """加载向量存储"""
    try:
        # 检查存储目录是否存在
        if not Path(vector_db_path).exists():
            print(f"❌ 存储目录不存在: {vector_db_path}")
            return None
        client= get_local_qdrant_client(vector_db_path)
        # 加载向量存储
        print (f"🔍 正在加载向量库: {collection_name} ...")
        vectorstore = QdrantVectorStore(
            client=client,
            collection_name=collection_name,
            embedding=embeddings
        )
        
        print(f"✅ 向量库加载成功: {collection_name}")
        return vectorstore
    except Exception as e:
        print(f"❌ 向量库加载失败: {e}")
        return None
async def CreateDatabase_title_split(vector_db_path,collection_name,embeddings,filename,savepath,filetype,titleSeparator,ifOCR):
    """按标题分割创建向量数据库"""
    try:
        pages = await file_loader(filename,filetype,ifOCR)
        ##根据投标文本3级标题来分割语句，如3.1.2
        # 合并相同编号的条文
        if isinstance(pages, list):
            document_text = "\n".join( page.page_content for page in pages)
        else:
            document_text = pages
        pattern = re.compile(r'\d+\.\d+\.\d+')
        if(titleSeparator=='h4'):
            pattern = re.compile(r'\d+\.\d+\.\d+\.\d+')
        elif(titleSeparator=='h2'):
            pattern = re.compile(r'\d+\.\d+')
        matches = list(pattern.finditer(document_text))
        sections = []
        current_start = 0
        current_number = None

        for match in matches:
            number = match.group(0)
            start = match.start()
            
            if current_number is None:
                current_number = number
            elif current_number != number:
                sections.append(document_text[current_start:start].strip())
                current_start = start
                current_number = number
                # 确保 pages 是一个包含所有内容的字符串
            sections.append(document_text[current_start:].strip())
            # 使用 LangChain 进行进一步处理
            text_splitter = CharacterTextSplitter(chunk_size=2000, chunk_overlap=0)
            print('使用标题分割:'+titleSeparator)
            split_documents = []
            for section in sections:
                split_documents.extend(text_splitter.split_text(section))
            # 创建 Document 对象列表
            documents = [Document(page_content=doc,metadata={"source": filename}) for doc in split_documents]
            # 创建chunk.txt，将分割后的内容存入
            with open(savepath+'/chunk.txt', 'w', encoding='utf-8') as f:
                for doc in documents:
                    f.write(doc.page_content + '\n'+'*******************chunk*******************'+'\n')

        uuids=await add_vector_db(vector_db_path,collection_name,embeddings,documents)
        return uuids
    except Exception as e:
        print(f"❌ 创建标题分割向量数据库失败: {e}")
        return None

    
#根据某个特殊字符进行分割，并返回向量库
async def CreateDatabase_character_split(vector_db_path,collection_name,embeddings,filename,savepath,filetype,separatorString,chunk_size,chunk_overlap,ifOCR):
    """按字符分割创建向量数据库"""
    try:
        pages = await file_loader(filename,filetype,ifOCR)
        ##采用固定分词
        separatorList=separatorString.split(',')
        text_splitter=RecursiveCharacterTextSplitter(separators=separatorList,chunk_size=chunk_size,chunk_overlap=chunk_overlap)
        documents=text_splitter.split_documents(pages)
        print('使用字符分:'+separatorString)
        # 创建chunk.txt，将分割后的内容存入
        with open(savepath+'/chunk.txt', 'w', encoding='utf-8') as f:
            for doc in documents:
                f.write(doc.page_content + '\n'+'*******************chunk*******************'+'\n')
        uuids=await add_vector_db(vector_db_path,collection_name,embeddings,documents)
        return uuids
    except Exception as e:
        print(f"❌ 创建字符分割向量数据库失败: {e}")
        if ("operands could not be broadcast together with shapes" in str(e) or "operands could not be broadcast together with shapes" in str(e)):
            close_all_qdrant_clients()
            get_local_qdrant_client(vector_db_path)  # 重置客户端
        return None
#按照markdown格式进行分割，并返回向量库
async def CreateDatabase_markdown_split(vector_db_path,collection_name,embeddings,filename,savepath,file_type,chunk_size,chunk_overlap,ifOCR):
    try:
        pages = await file_loader(filename,file_type,ifOCR)
        ##采用markdown分词
        text_splitter = MarkdownTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
        print('使用markdown分词！')
        documents=text_splitter.split_documents(pages)
        # 创建chunk.txt，将分割后的内容存入
        with open(savepath+'/chunk.txt', 'w', encoding='utf-8') as f:
            for doc in documents:
                f.write(doc.page_content + '\n'+'*******************chunk*******************'+'\n')
        uuids=await add_vector_db(vector_db_path,collection_name,embeddings,documents)
        return uuids
    except Exception as e:
        print(f"❌ 创建markdown向量数据库失败: {e}")
        return None
def delete_vector_database(vector_db_path, collection_name):
    """删除向量数据库"""
    try:
        client = get_local_qdrant_client(vector_db_path)
        client.delete_collection(collection_name)
        print(f"✅ 向量库删除成功: {collection_name}")
        # vector_path = Path(vector_db_path)
        # if vector_path.exists():
        #     # Qdrant 本地存储通常包含以下文件：
        #     # - collection/{collection_name}/
        #     # - meta.json
        #     # - .sqlite 文件等
        #     client.close() # 关闭客户端连接
        #     # 删除集合目录
        #     collection_dir = vector_path / "collection" / collection_name
        #     if collection_dir.exists():
        #         shutil.rmtree(collection_dir)
        #         print(f"✅ 已删除集合目录: {collection_dir}")        
        return True
    except Exception as e:
        print(f"❌ 向量库删除失败: {e}")
        return False
def delete_vector_db_by_ids(vector_db_path:str, collection_name:str, ids:list[str],embeddings):
    """根据 ID 删除向量数据库中的文档"""
    try:
        client = get_local_qdrant_client(vector_db_path)
        vectorstore = QdrantVectorStore(
            client=client,
            collection_name=collection_name,
            embedding=embeddings
        )
        # 删除指定 ID 的文档
        ifsuccess=vectorstore.delete(ids=ids)
        if ifsuccess:
            print(f"✅ 成功删除 {len(ids)} 个文档,ids: {ids}")
            return True
        else:
            print(f"❌ 删除文档失败，可能是 ID 不存在,ids: {ids}")
            return False
    except Exception as e:
        print(f"❌ 删除文档失败: {e}")
        return False

def list_collections(vector_db_path):
    """列出所有集合"""
    try:
        client = get_local_qdrant_client(vector_db_path)
        collections = client.get_collections()
        collection_names = [col.name for col in collections.collections]
        print(f"现有集合: {collection_names}")
        return collection_names
    except Exception as e:
        print(f"❌ 获取集合列表失败: {e}")
        return []
#加载文件并返回解析后的页面
async def file_loader(filename,filetype,ifOCR):
    if ifOCR==True or ifOCR=="true":
        #使用ocr进行解析
        print('使用ocr进行解析！')
        pages=await llama_parse(filename)
        return pages
    else:
        if filetype=='application/pdf':
            loader=PyPDFLoader(filename)
        #txt数据加载
        if filetype=='text/plain':
            # 将\替换成/
            filename=filename.replace('\\','/')
            loader=TextLoader(filename,encoding='utf-8')
        #docx数据加载
        if filetype=='application/vnd.openxmlformats-officedocument.wordprocessingml.document':
            loader=Docx2txtLoader(filename)
        #csv数据加载
        if filetype=='text/csv':
            loader = CSVLoader(file_path=filename,encoding='utf-8')
        #pptx数据加载
        if filetype=='application/vnd.openxmlformats-officedocument.presentationml.presentation':

            loader = UnstructuredPowerPointLoader(filename)
        pages=loader.load()
        return pages

