# -*- coding: utf-8 -*-
import ast
from datetime import datetime
from pathlib import Path
from dataclasses import dataclass, field
import pandas as pd
from langchain.document_loaders import TextLoader, Docx2txtLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
import os
from langchain.vectorstores import Chroma
from dataStorage import to_database
import logging

from config import cfg
from structural_vector import structual_text_v2
from FlagEmbedding import BGEM3FlagModel
from process_md import ProcessMd

model = BGEM3FlagModel('models/Shitao/bge-m3', use_fp16=True)


@dataclass
class DocumentListItem():
    file_name: str = ''
    file_path: str = ''
    file_category: str = ''
    file_hash: str = ''
    doc_ids: list = field(default_factory=list)
    add_time: str = ''


class DocumentListData():
    """本地知识库文档列表数据
    弥补向量数据库无法实现保存文档基础信息的功能
    """

    def __init__(self, filepath):
        self.filepath = filepath
        self.read()

    def read(self):
        if Path(self.filepath).exists():
            try:
                self.dataframe = pd.read_csv(
                    filepath_or_buffer=self.filepath,
                    index_col=0,
                    header=0,
                    encoding='utf-8')
            except:
                self.dataframe = pd.read_csv(
                    filepath_or_buffer=self.filepath,
                    index_col=0,
                    header=0,
                    encoding='GBK')
            print('共读取 %s 条数据' % len(self.dataframe.index))

        else:
            columns = DocumentListItem().__dict__.keys()
            self.dataframe = pd.DataFrame(columns=columns)
            self.updateFile()
            print('路径不存在已新建')

    def add(self, _data: DocumentListItem):
        """增加一条数据"""
        if not self.search(_data.file_name):
            data_dict = _data.__dict__
            self.dataframe = self.dataframe._append(data_dict, ignore_index=True)
            self.updateFile()
        else:
            print('数据已存在插入失败')

    def delete_one(self, file_name):
        """删除一条数据"""
        if not self.search(file_name):
            print('数据不存在')
        else:
            index_row = self.dataframe[self.dataframe.file_name == file_name].index
            self.dataframe.drop(index=index_row,inplace=True)
            self.dataframe.to_csv(self.filepath)


    def search(self, file_name) -> bool:
        """根据文件名判断是否已存在"""
        if file_name in self.dataframe['file_name'].values:
            return True
        else:
            return False
    def find_ids(self,file_name):
        if self.search(file_name):
            sub_dataframe = self.dataframe[self.dataframe.file_name == file_name]
            ids_ser = sub_dataframe["doc_ids"].iloc[0]
            return ast.literal_eval(ids_ser)
        else:
            return False

    def search_part(self,line,cols):
        sub_dataframe = self.dataframe[self.dataframe.file_name.str.startswith("2025")]
        res = sub_dataframe[cols].tolist()
        return res


    def updateFile(self):
        """文件修改后保存"""
        with open(self.filepath, 'w') as f:
            self.dataframe.to_csv(
                path_or_buf=f,
                index=True,
                lineterminator='\n',
                encoding="gbk")
                # encoding='GB18030')


class DocumentVectorData():
    """本地知识库向量数据"""

    def __init__(self,
                 collection_name,
                 embedding_model_name,
                 data_dir) -> None:
        """
        Params
            embedding_model_name
            list_path
            vector_dir
        """

        # !!!结构化版本!!!!
        # list_path = Path(data_dir) / 'filelist_structual.csv'

        # !!!base版本!!!
        # list_path = Path(data_dir) / 'filelist_basic.csv'
        pwd = os.getcwd()
        cf = os.path.join(pwd, data_dir)
        if not os.path.exists(cf):
            # print("存放向量库的本地文件不存在")
            os.makedirs(cf)
        list_path = Path(cf) / f'filelist_{collection_name}.csv'

        vector_dir = Path(cf) / 'vectordb'
        file_dir = Path(data_dir) / 'filedata'

        self.documentListData = DocumentListData(list_path)
        self.embeddings = HuggingFaceEmbeddings(model_name=embedding_model_name)
        self.vectordb = Chroma(
            collection_name=collection_name,
            embedding_function=self.embeddings,
            persist_directory=str(vector_dir))

    def add_file(self, filepath):
        """增加新的文档"""
        # 判断文件是否存在
        if not Path(filepath).exists():
            print('文件不存在已跳过')
            return
        file_name = Path(filepath).name
        print('开始处理: %s' % file_name)

        # 判断文件是否已经保存过
        if self.documentListData.search(file_name):
            #print('x 文档已存在跳过数据持久化')
            return

            # 判断文件类型
        #print('√ 文档未在列表中, 开始文档读取: %s' % file_name)
        if Path(filepath).suffix == '.txt':
            loader = TextLoader(file_path=filepath, encoding='utf-8')
        elif Path(filepath).suffix == '.docx':
            loader = Docx2txtLoader(filepath)
        else:
            print('目前无法处理此类型文件', Path(filepath).suffix)
            return

        #print('知识库整体内容', loader)
        # print('知识库整体内容',loader.content)

        #print('√ 开始文档切分')
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100, length_function=len)
        doc_texts = loader.load_and_split(text_splitter=text_splitter)
        print('切分后文档', doc_texts)
        print('切分后文档的类型')
        docids = self.vectordb.add_documents(documents=doc_texts)
        self.vectordb.persist()
        print('√ 保存向量数据库')

        _file = DocumentListItem(
            file_name=file_name,
            file_path=filepath,
            file_hash='hash',
            doc_ids=docids,
            add_time=datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        )
        self.documentListData.add(_file)
        print('√ 文档信息保存至csv')

    def add_file_v1(self,filepath,file_name):
        # 判断文件是否存在
        if not Path(filepath).exists():
            print('文件不存在已跳过')
            return
        # file_name_old = Path(filepath).name
        print('开始处理: %s' % file_name)

        # 判断文件是否已经保存过
        if self.documentListData.search(file_name):
            # print('x 文档已存在跳过数据持久化')
            return
        pm = ProcessMd(filepath)
        html_contents = pm.open_md()
        txt_list = ProcessMd.split_md(html_contents)
        txt_documents = ProcessMd.input_doc(txt_list,file_name)
        docids = self.vectordb.add_documents(documents=txt_documents)
        self.vectordb.persist()
        print('√ 保存向量数据库')

        _file = DocumentListItem(
            file_name=file_name,
            file_path=filepath,
            file_hash='hash',
            doc_ids=docids,
            add_time=datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        )
        self.documentListData.add(_file)
        print('√ 文档信息保存至csv')

    def add_sql_file(self,filepath,file_name,txt_list):
        print('开始处理: %s' % file_name)

        # 判断文件是否已经保存过
        if self.documentListData.search(file_name):
            # print('x 文档已存在跳过数据持久化')
            return
        txt_documents = ProcessMd.input_doc(txt_list,file_name)
        docids = self.vectordb.add_documents(documents=txt_documents)
        self.vectordb.persist()
        print('√ 保存向量数据库')

        _file = DocumentListItem(
            file_name=file_name,
            file_path=filepath,
            file_hash='hash',
            doc_ids=docids,
            add_time=datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        )
        self.documentListData.add(_file)
        print('√ 文档信息保存至csv')

    def update_file(self,filepath,file_name):
        pm = ProcessMd(filepath)
        html_contents = pm.open_md()
        txt_list = ProcessMd.split_md(html_contents)
        txt_documents = ProcessMd.input_doc(txt_list,file_name)
        ids = self.documentListData.find_ids(file_name)
        if ids:
            print(len(txt_documents),len(ids))
            for i, j in zip(ids,txt_documents):
                try:
                    self.vectordb.update_documents(ids = [i],documents=[j])
                except Exception as e:
                    print(e)
        # self.vectordb.persist()

    def delete_file(self, file_name):
        ids = self.documentListData.find_ids(file_name)
        if ids:
            self.vectordb.delete(ids=ids)
            results = self.vectordb.get(ids=ids)
            print(f"删除后查询{file_name}的结果: {results}")
            self.documentListData.delete_one(file_name=file_name)


    def add_file_structural(self, file_name):
        """增加新的文档，文档预先进行了结构化处理"""
        print("进入add_file_structural")
        # 从数据库获取处理后的文档
        doc_texts = structual_text_v2(article_name=file_name)
        # print('切分后文档',doc_texts)
        # print('切分后文档的类型')
        docids = self.vectordb.add_documents(documents=doc_texts)
        self.vectordb.persist()
        print('√ 保存向量数据库')

        _file = DocumentListItem(
            file_name=file_name,  # 待更新
            file_path='mysql',  # 待更新
            file_hash='hash',
            doc_ids=docids,
            add_time=datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        )
        self.documentListData.add(_file)
        print('√ 文档信息保存至csv')


    def similarity_search_score(self, question: str, k):
        vectordb_search = self.vectordb.similarity_search_with_score(question, k=k)
        return vectordb_search

    def similarity_search_where(self, question: str, k:int ):
        where = {"org_article_name": "nan_刘爱力董事长在中国邮政集团有限公司2025年工作会议暨第一届第六次职工代表大会上的讲话"}
        # vectordb_search = self.vectordb.similarity_search(question, k=k, filter=where)
        vectordb_search = self.vectordb.similarity_search_with_score(question, k=k, filter=where)
        print(vectordb_search)
        return vectordb_search


    def similarity_search_score_filter_where(self, question: str, k, filterx):
        '''
         def similarity_search_with_score(
        self,
        query: str,
        k: int = DEFAULT_K,
        filter: Optional[Dict[str, str]] = None,
        where_document: Optional[Dict[str, str]] = None,
        **kwargs: Any,
    ) -> List[Tuple[Document, float]]:
        '''
        # print(filterx[0],filterx[1])
        # k = 100
        # question='2019到2023顺丰航空全货机数量'
        # filterx =[{'$and': [{'领域': {'$in': ['快递物流']}}, {'公司名称的分词': {'$in': ['航空', '顺丰']}}, {'时间年份': {'$in': [2019, 2020, 2021, 2022, 2023]}}]}, {'$or': [{'$contains': '货机'}, {'$contains': '数量'}]}]
        vectordb_search = self.vectordb.similarity_search_with_score(query=question, k=k, filter=filterx[0], where_document=filterx[1])
        return vectordb_search

    def peak(self,ids):
        # 2. 查看所有数据
        return self.vectordb.get(ids=ids)




# 创建或加载向量数据库Company_Base
def vector_load(base_name: str):
    # 需要在config.json文件中，针对每个知识库配置相应的嵌入模型和存储路径
    # 当前采用默认的嵌入模型
    if base_name == 'Template_Warehouse':
        embedding_model_dict = cfg.readValue('Template_Warehouse', 'embedding_model_dict')
        data_dir = f'data_{base_name}'
    elif base_name == 'Company_Base':
        embedding_model_dict = cfg.readValue('Company_Base', 'embedding_model_dict')
        data_dir = f'data_{base_name}'
    elif base_name == "local_topic":
        embedding_model_dict = cfg.readValue('local_topic', 'embedding_model_dict')
        data_dir = f'data_{base_name}'
    elif base_name == "out_search":
        embedding_model_dict = cfg.readValue('out_search', 'embedding_model_dict')
        data_dir = f'data_{base_name}'
    elif base_name == 'account':
        embedding_model_dict = cfg.readValue('account', 'embedding_model_dict')
        data_dir = f'data_{base_name}'
    else:
        embedding_model_dict = cfg.readValue('Basic', 'embedding_model_dict')
        data_dir = cfg.readValue('basic', 'datadir')

    # print(type(embedding_model_dict),embedding_model_dict)
    documentVectorData = DocumentVectorData(
        collection_name=base_name,
        embedding_model_name=embedding_model_dict["BGEM3FlagModel"],
        data_dir=data_dir)

    return documentVectorData



def test_document_add(documentVectorData):
    """测试给本地知识库增加文档"""
    # file = "D:/CPRI/01_规章制度/食住行/邮政科学研究规划院在职无房职工住房补贴.txt"
    # file = "D:/CPRI/项目-大语言模型/本地知识库文档/院科研成果/寄递业务三个视角“对标立标达标”研究.docx"
    # file = r"G:\LLM\test\20210816-广发交运-跨境电商物流：需求升级引领产业繁荣，资源获取与整合能力定胜负.docx"
    # documentVectorData.add_file(file)
    # file = "D:\邮科院工作\生成式大模型科研应用方案研究\本地知识库\documents_in\集团公司科技项目全流程管理实施要求test.docx"
    # file = "D:/CPRI/项目-大语言模型/本地知识库文档/董事长讲话/董事长2023年寄递工作会讲话.docx"
    # 设定要遍历的目录
    # sql = 'select file_name from file_map where file_type=1'
    # file = to_database(sql, ())
    # file_name_list = [i[0].split(".")[0] for i in file]
    # directory = os.path.join(os.getcwd(),"pdf")
    # 使用os.walk()遍历目录
    df_map_path = os.path.join(os.getcwd(),"temp","map.xlsx")
    df_map = pd.read_excel(df_map_path)
    directory = os.path.join(os.getcwd(),"temp")
    i = 1
    use_path = []
    for root, dirs, files in os.walk(directory):
        for filename in files:
            # 构造完整的文件路径
            if filename.endswith(".md"):
                # if filename.split(".")[0] in file_name_list:
                filepath = os.path.join(root, filename)
                use_path.append(filepath)

    for path in use_path:
        print(f"正在处理第{i}篇文章：文章名字为：{Path(path).stem}")
        logging.info(f"正在处理第{i}篇文章：文章名字为：{Path(path).stem}")
        try:
            name = Path(path).stem
            res = df_map[df_map['file_name_origin'] == name]['filename'].values[0]
            file_name = res.split(".")[0]
            start_time = datetime.now()
            documentVectorData.add_file_v1(path, file_name)
            end_time = datetime.now()
            time_difference = end_time - start_time
            print(f"时间差（秒）: {time_difference.total_seconds()}")

        except Exception  as e:
            print(f"出现的异常错误为：{e}")
            with open("err.txt","a") as f:
                f.write(path)
                f.write("\n")
                f.close()
            continue
        i = i + 1



def test_document_add_sql(documentVectorData):
    now_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    sql = f"select distinct article_name from documents_split_text where in_or_out_file = 0"
    data_name = to_database(sql, ())
    data_name_list = [i[0] for i in data_name]
    for name in data_name_list:
        sql = "select article_split_text  from documents_split_text where article_name = %s"
        data_text = to_database(sql,(name,))
        data_text_list = [i[0] for i in data_text]
        documentVectorData.add_sql_file(filepath="",file_name=name,txt_list=data_text_list)
    print("document_add_sql is done")



def test_document_update(ddocumentVectorData):
    df_map_path = os.path.join(os.getcwd(), "temp", "map.xlsx")
    df_map = pd.read_excel(df_map_path)
    directory = os.path.join(os.getcwd(), "temp")
    i = 1
    use_path = []
    for root, dirs, files in os.walk(directory):
        for filename in files:
            # 构造完整的文件路径
            if filename.endswith(".md"):
                # if filename.split(".")[0] in file_name_list:
                filepath = os.path.join(root, filename)
                use_path.append(filepath)

    for path in use_path:
        print(f"正在处理第{i}篇文章：文章名字为：{Path(path).stem}")
        logging.info(f"正在处理第{i}篇文章：文章名字为：{Path(path).stem}")
        # try:
        name = Path(path).stem
        res = df_map[df_map['file_name_origin'] == name]['filename'].values[0]
        file_name = res.split(".")[0]
        start_time = datetime.now()
        documentVectorData.update_file(path, file_name)
        end_time = datetime.now()
        time_difference = end_time - start_time
        print(f"时间差（秒）: {time_difference.total_seconds()}")

        # except Exception as e:
        #     print(f"出现的异常错误为：{e}")
        #     with open("err.txt", "a") as f:
        #         f.write(path)
        #         f.write("\n")
        #         f.close()
        #     continue
        i = i + 1

def test_peak(documentVectorData):
    print(documentVectorData.peak(ids=["c10f166-8c17-4283-b307-1b89ddd6ad6f"]))

def test_document_add_structual(documentVectorData):
    """测试给本地知识库增加文档"""
    # # 数据库中的编号
    # article_id = 11
    #
    # # 数据库中的名称
    # file_name = '样例10：邮政科学研究规划院有限公司（中国邮政集团有限公司邮政研究中心）科学技术奖评选办法(2023年修订版)'
    # documentVectorData.add_file_structural(article_id, file_name)
    # article_id = 12
    print("进入test_document_add_structual")
    path_iter = Path(r"D:\file_data\20250208_neibuziliao47_out")
    file_names = []
    with open(r"E:\llm\server\data_Company_Base\filelist_Company_Base.csv","r") as f:
        for line in f:
            file_names.append(line.split(",")[1])
    # documentVectorData.add_file_structural(path_iter.stem)
    idx = 0
    for path in path_iter.iterdir():
        idx = idx + 1
        name = path.stem
        if name in file_names:
            print(f"第{idx}篇{name}文章已入库了")
            continue
        documentVectorData.add_file_structural_v4(path.stem)
        print(f"第{idx}篇{path.stem}入库完毕")

    print("done")


def test_document_search(documentVectorData):
    """测试从本地知识库检索文档
        def similarity_search_score_filter_where(self, question: str, k, filterx):"""
    question = "2019到2023顺丰航空货机数量"
    # question = "内务员需要注意的风险防控点有哪些？"
    # filterx =[{'$and': [{'主题': {'$in': ['快递物流']}}, {'公司名称': {'$in': ['顺丰']}}, {'年份范围': {'$lte': 2023}}]}, {'$or': [{'$contains': '全货机'},{'$contains': '全货机数量'}]}]
    # filterx =[{'$and': [{'领域': {'$in': ['快递物流']}}, {'公司名称的分词': {'$in': ['顺丰航空']}}, {'年份范围': {'$lte': 2024}}]},{'$or': [{'$contains': '顺丰航空'}, {'$contains': '全货机'}, {'$contains': '数量'}, {'$contains': '发展历程'}]}]
    # filterx =[{'$and': [{'主题': {'$in': ['快递物流']}}, {'公司名称的分词': {'$in': ['顺丰',"航空"]}}, {'年份范围': {'$lte': 2024}}]},{'$contains': '全货机'}]
    # filterx =[{'$and': [{'领域': {'$in': ['快递物流']}}, {'公司名称的分词': {'$in': ['顺丰', '航空']}},
    #                             {'时间年份': {'$lte': 2024}}]},
    #           {'$and':[
    #               {'$contains': '全货机'},
    #               {"$or":[
    #                   {'$contains': '全货机'},
    #                   {'$contains': '数量'},
    #                   {'$contains': '发展历程'}
    #               ]}
    #           ]}]
    # {'$or': [{'$contains': '全货机'}, {'$contains': '数量'}]}
    question = "2019到2023年顺丰航空拥有的全货机数量是多少？"
    filterx =[{'$and': [{'领域': {'$in': ['快递物流']}},
                        {'公司名称': {'$in': ['顺丰']}},
                        {'时间': {'$in': [2019, 2020, 2021, 2022, 2023]}}]},
              {'$or': [{'$contains': '全货机'}, {'$contains': '数量'}]}]
    filterx = [{'$and': [{'时间': {'$in': [2019, 2020, 2021, 2022, 2023]}}, {'领域': {'$in': ['快递物流']}}, {'公司名称': {'$in': ['顺丰']}}]}, {'$or': [{'$contains': '拥有'}, {'$contains': '货机'}, {'$contains': '数量'}, {'$contains': '多少'}]}]
    print(f"问题是：{question}")
    print(f"条目过滤：{filterx[0]}")
    print(f"内容过滤是：{filterx[1]}")
    vectordb_search = documentVectorData.similarity_search_score_filter_where(question, 100, filterx)
    print(f"查询出来的条数：{len(vectordb_search)}")
    for idx,item in enumerate(vectordb_search):
        print(f"第{idx}条数据：{item}")
    return vectordb_search

def test_doc_query(documentVectorData):
    res = documentVectorData.similarity_search_where(question="全面构建高水平市场化体系，以对标对表实现资源配置效率最优化、效益最大化",k=200)
    return res



def test_document_query(documentVectorData):
    # document_cnt = documentVectorData.vectordb.get(limit=3,where={"owner":{"$eq":34}})
    # document_cnt = documentVectorData.vectordb.get(limit=3,where={ "$and":[{"source":{"$eq":"mysql"}},{"article_id":{"$eq":'2'}}]})
    document_cnt = documentVectorData.vectordb.get(limit=3, where={
        "$and": [{"owner": {"$eq": 20}}, {"article_id": {"$eq": '10'}}]})
    # document_cnt = documentVectorData.vectordb.get(ids=['7bb6acb5-5789-11ee-b787-a4c3f049a37b'])
    print('document_cnt', document_cnt)


def test_document_delete(documentVectorData):
    # 通过where筛选后删除不成功，只能通过id删除
    # documentVectorData.vectordb.delete(where={"source":{"$eq":"mysql"}})
    documentVectorData.vectordb.delete(ids=['13089106-9ce3-41e5-9db9-3f6038792acd'])
    print('文件已删除')


def test_collection_delete(documentVectorData):
    # 删除数据集
    documentVectorData.vectordb.delete_collection()
    # documentVectorData.vectordb.delete(ids=['79667e75-512f-11ee-8124-a4c3f049a37b'])
    print('数据集已删除')

def test_delete(documentVectorData,file_name):
    documentVectorData.delete_file(file_name)

def test_delete_files(documentVectorData):
    filenames = documentVectorData.documentListData.search_part(line = "",cols='file_name')
    for filename in filenames:
        documentVectorData.delete_file(filename)
    print("done")

def add_inner_document():
    documentVectorData = vector_load("Company_Base")
    import os
    # 设定要遍历的目录
    directory = r"G:\LLM\data\资讯\D32-docx"
    # 使用os.walk()遍历目录
    for root, dirs, files in os.walk(directory):
        for filename in files:
            # 构造完整的文件路径
            filepath = os.path.join(root, filename)
            # 打开并读取文件内容
            documentVectorData.add_file(filepath)

if __name__ == '__main__':

    # 企业管理知识库
    # documentVectorData = vector_load('Template_Warehouse')
    # documentVectorData = vector_load('local_topic')
    # documentVectorData = vector_load('Company_Base')
    # documentVectorData = vector_load('local_topic')
    documentVectorData = vector_load('out_search')


    # 模板下载库
    test_peak(documentVectorData)
    # test_delete_files(documentVectorData)
    # test_document_add_sql(documentVectorData)
    # test_document_update(documentVectorData)
    # test_document_add(documentVectorData)
    # test_delete(documentVectorData,"20250408_095320_914")
    # test_document_add_structual(documentVectorData)
    # test_document_search(documentVectorData)
    # test_document_query(documentVectorData)
    # test_document_delete(documentVectorData)
    # test_collection_delete()
    test_doc_query(documentVectorData)
    # test_delete_files(documentVectorData)
    # test_peak(documentVectorData)


