import os
import re
from dataclasses import dataclass
from typing import Optional

import pptx
from langchain_text_splitters import RecursiveCharacterTextSplitter, Language, CharacterTextSplitter

from app.utils import get_milvus_client
from app.utils.embedding_api import get_embedding, get_embedding_2
from app.utils.minerU_handler import process_file
from app.utils.rag_word_util import parse_doc_by_one

Collection = get_milvus_client()
import threading

# 定义全局锁
lock = threading.Lock()


@dataclass
class VectorEntity():
    id: int
    vector: []
    text: str
    fileName: str
    fileFrom: str
    fileCity: str
    year: int
    month: int
    day: int


@dataclass
class VectorEntityPoc():
    id: int
    dense_vector: []
    text: str
    cate: str
    fileName: str
    year: int
    month: int
    tags: str
    summary: str


@dataclass
class Milvus_find():
    id: int
    fileName: str


@dataclass
class Milvus_reasult():
    text: str
    fileName: str


class MilvusDo:

    def delete_index(self, collection_name):
        # 删除集合
        if collection_name in Collection.list_collections():
            Collection.drop_collection(collection_name=collection_name)
            return "成功"
        else:
            print(f"Collection '{collection_name}' does not exist.")
            return "失败"

    def create_index(self, collection_name):

        # 创建一个新的集合
        if collection_name not in Collection.list_collections():
            # embedding_model = embedding.load_embedding()
            Collection.create_collection(collection_name=collection_name, dimension=1024,
                                         metric_type="L2")
            return "成功"
        else:
            print(f"Collection '{collection_name}' already exists.")
            return "失败"

    def read_last_data(self):
        lock.acquire()
        try:
            # 读取文件内容
            with open("milvus_index.txt", 'r') as file:
                lines = file.readlines()

            # 获取最后一个数据
            if lines:
                return int(lines[-1].strip())
            else:
                return None  # 如果文件为空，返回 None
        except FileNotFoundError:
            return None  # 如果文件不存在，返回 None
        except ValueError:
            raise ValueError("文件内容无法解析为整数")
        finally:
            lock.release()

    def search_by_searchName(
            self, searchName,
            collectionName: str,
            limit: Optional[int] = 10,
            filter: Optional[str] = None,
            dimiension: Optional[int] = 768,
    ):
        # embedding_model = embedding.load_embedding()
        # list_e = embedding_model.embedding(searchName)
        search_params = {
            "metric_type": "L2",
            "index_type": "IVF_FLAT",
            # "params": {"nlist": dimiension},
            "params": {"nlist": 1024},
        }
        if collectionName == "sfat":
            data_embedding = [get_embedding(searchName)]
        elif collectionName == "mmt":
            data_embedding = [get_embedding(searchName)]
        else:
            data_embedding = [get_embedding_2(searchName)]
        if filter is not None:
            res = Collection.search(
                collection_name=collectionName,
                # data=[list_e],
                data=data_embedding,
                limit=limit,
                filter=filter,
                output_fields=["text", "fileName", "fileFrom", "fileCity", "year", "month", "day"],
                search_params=search_params,
            )
        else:
            res = Collection.search(
                collection_name=collectionName,
                # data=[list_e],
                data=data_embedding,
                limit=limit,
                output_fields=["text", "fileName", "fileFrom", "fileCity", "year", "month", "day"],
                search_params=search_params,
            )

        mil_r = []
        # 查询上下文
        for result in res:
            print("result", result)
            for item in result:
                ids = [item.id + offset for offset in range(-20, 21)]
                resBeforeAfter = Collection.get(
                    collection_name=collectionName,
                    ids=ids,
                    output_fields=["text", "fileName"],
                )
                fileNowName = item.fileName
                textR = ""

                for resBeforeAfterResultItem in resBeforeAfter:

                    if resBeforeAfterResultItem.get('fileName') == fileNowName:
                        textR = textR + resBeforeAfterResultItem.get('text')
                mil_r.append(Milvus_reasult(text=textR, fileName=item.fileName))
        # for result in res:
        #     for item in result:
        #         mil_r.append(Milvus_reasult(text=item.text, fileName=item.fileName))
        return mil_r

    # def delete_by_fileName(self,collection_name,fileName):
    #     res = Collection.delete(
    #         collection_name=collection_name,
    #         # highlight-next-line
    #         filter="fileName in ['" + fileName+"']"
    #     )
    #
    #
    def insert_one(self, vectorEntity: VectorEntity, collectionName: str):

        insert_result = Collection.insert(
            collection_name=collectionName, data=vectorEntity.__dict__
        )
        return insert_result

    def insert_one_poc(self, vectorEntity: VectorEntityPoc, collectionName: str):

        insert_result = Collection.insert(
            collection_name=collectionName, data=vectorEntity.__dict__
        )
        return insert_result

    def delete_by_fileName(self, collection_name, file_name):
        res = Collection.delete(
            collection_name=collection_name,
            # highlight-next-line
            filter="fileName in ['" + file_name + "']"
        )

    def local_incr(self):
        file_path = "milvus_index.txt"
        lock.acquire()
        try:
            # 读取文件内容
            with open(file_path, 'r') as file:
                lines = file.readlines()

            # 获取最后一个数据并加一
            if lines:
                last_data = int(lines[-1].strip())
                new_data = last_data + 1
            else:
                new_data = 1203232  # 如果文件为空，初始化为1

            # 写入更新后的数据
            with open(file_path, 'a') as file:
                file.write(f"{new_data}\n")
        finally:
            lock.release()

    def split_and_append_documents(self, documents, max_length=8000):
        result = []
        for document in documents:
            if len(document) > max_length:
                # Split the document into chunks of size `max_length`
                for i in range(0, len(document), max_length):
                    result.append(document[i:i + max_length])
            else:
                result.append(document)
        return result

    def upload_file_to_milvus(self, memory_name, file_path, file_name, file_from):

        # self.create_index(memory_name)
        # 查看是否存在知识库如果不存在就创建一个知识库文件夹
        # self.delete_by_fileName(collection_name=memory_name, file_name=file_name)

        name, ext = os.path.splitext(file_name)
        try:
            # 使用正则表达式分割文件名
            parts = re.split(r'_', name)

            # 提取内容
            fileFrom = file_from
            fileCity = parts[-2]
            # 提取年月日
            date_part = parts[-1]  # 提取最后一个部分（日期部分）
            if "年" in date_part or date_part != "default":

                date_part_date = date_part.replace('年', '-').replace('月', '-').replace('日', '').replace('.docx',
                                                                                                           '')  # 替换为标准日期格式

                date_part_dates = re.split(r'-', date_part_date)

                year = int(date_part_dates[0])
                if len(date_part_dates) > 1 and date_part_dates[1] != '' and date_part_dates[1] != None:

                    month = int(date_part_dates[1])
                else:
                    month = 0
                if len(date_part_dates) > 2 and date_part_dates[2] != '' and date_part_dates[2] != None:

                    day = int(date_part_dates[2])
                else:
                    day = 0
            else:
                year = 0
                month = 0
                day = 0
        except Exception as e:
            fileFrom = file_from
            fileCity = "default"
            year = 0
            month = 0
            day = 0

        if ext.lower() == '.docx':

            documents = parse_doc_by_one(file_path)
            documents = documents + parse_doc_by_one(file_path, chunk_size=190)
            for documenta in documents:
                print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!切分 ")
                print(documenta)
            combined_str = "\n".join(documents)
            if not os.path.exists("output_dir"):
                os.makedirs("output_dir")
            output_path = os.path.join("output_dir/", file_name + ".txt")

            # 写入清理后的内容
            with open(output_path, 'w', encoding='utf-8') as f:
                f.write(combined_str)
            documents = self.split_and_append_documents(documents)
            for document in documents:
                # 添加数据

                self.local_incr()

                save_vec = VectorEntity(id=self.read_last_data(),
                                        # vector=text_emb.tolist(),
                                        vector=get_embedding(document),
                                        text=document,
                                        fileName=file_name,
                                        fileFrom=fileFrom,
                                        fileCity=fileCity,
                                        year=year,
                                        month=month,
                                        day=day
                                        )
                self.insert_one(save_vec, memory_name)
        elif ext.lower() == '.pdf':
            file_path = "http://36.140.251.14:17860/fileHandler/download/" + file_path
            file_path = process_file(file_path)
            if not file_path:
                print("文件处理失败，无法继续上传到Milvus")
                return "failed"
            documents = parse_doc_by_one(file_path)
            documents = documents + parse_doc_by_one(file_path, chunk_size=190)
            for documenta in documents:
                print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!切分pdf ")
                print(documenta)
            combined_str = "\n".join(documents)
            if not os.path.exists("output_dir"):
                os.makedirs("output_dir")
            output_path = os.path.join("output_dir/", file_name + ".txt")

            # 写入清理后的内容
            with open(output_path, 'w', encoding='utf-8') as f:
                f.write(combined_str)
            documents = self.split_and_append_documents(documents)
            for document in documents:
                # 添加数据

                self.local_incr()

                save_vec = VectorEntity(id=self.read_last_data(),
                                        # vector=text_emb.tolist(),
                                        vector=get_embedding(document),
                                        text=document,
                                        fileName=file_name,
                                        fileFrom=fileFrom,
                                        fileCity=fileCity,
                                        year=year,
                                        month=month,
                                        day=day
                                        )
                self.insert_one(save_vec, memory_name)

        elif ext.lower() == '.md':
            # 读取Markdown文件内容
            with open(file_path, 'r', encoding='utf-8') as f:
                markdown_text = f.read()

            # 使用针对Markdown的分割器
            markdown_splitter = RecursiveCharacterTextSplitter.from_language(
                language=Language.MARKDOWN, chunk_size=150, chunk_overlap=0
            )

            chunks = markdown_splitter.split_text(markdown_text)

            for i, chunk in enumerate(chunks):
                print(f"--- Chunk {i + 1} ---")
                print(chunk)
                print(f"(长度: {len(chunk)})\n")
            combined_str = "\n".join(chunks)
            if not os.path.exists("output_dir"):
                os.makedirs("output_dir")
            output_path = os.path.join("output_dir/", file_name + ".txt")

            # 写入清理后的内容
            with open(output_path, 'w', encoding='utf-8') as f:
                f.write(combined_str)
            for i, chunk in enumerate(chunks):
                # 添加数据

                self.local_incr()

                save_vec = VectorEntity(id=self.read_last_data(),
                                        # vector=text_emb.tolist(),
                                        vector=get_embedding(chunk),
                                        text=chunk,
                                        fileName=file_name,
                                        fileFrom=fileFrom,
                                        fileCity=fileCity,
                                        year=year,
                                        month=month,
                                        day=day
                                        )
                self.insert_one(save_vec, memory_name)

            return
        elif ext.lower() == '.txt':
            # 读取文本文件内容
            with open(file_path, 'r', encoding='utf-8') as f:
                text = f.read()

            # 切分 current_text到 texts中
            from langchain.docstore.document import Document as document_langchain
            docs = [document_langchain(page_content=text)]
            text_splitter = CharacterTextSplitter(
                chunk_size=150, chunk_overlap=20, separator='\n')
            doc_texts = text_splitter.split_documents(docs)
            documents = [d.page_content for d in doc_texts if d.page_content.strip()]

            for documenta in documents:
                print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!切分 ")
                print(documenta)
            combined_str = "\n".join(documents)
            if not os.path.exists("output_dir"):
                os.makedirs("output_dir")
            output_path = os.path.join("output_dir/", file_name + ".txt")

            # 写入清理后的内容
            with open(output_path, 'w', encoding='utf-8') as f:
                f.write(combined_str)
            documents = self.split_and_append_documents(documents)
            for document in documents:
                # 添加数据

                self.local_incr()

                save_vec = VectorEntity(id=self.read_last_data(),
                                        # vector=text_emb.tolist(),
                                        vector=get_embedding(document),
                                        text=document,
                                        fileName=file_name,
                                        fileFrom=fileFrom,
                                        fileCity=fileCity,
                                        year=year,
                                        month=month,
                                        day=day
                                        )
                self.insert_one(save_vec, memory_name)
            return
        elif ext.lower() == '.pptx':
            # 解析ppt
            prs = pptx.Presentation(file_path)
            # 获取所有slides
            text = ''
            for slide in prs.slides:

                for shape in slide.shapes:
                    if (shape.has_text_frame):
                        text = text + shape.text_frame.text + "\n"
            # 切分 current_text到 texts中
            from langchain.docstore.document import Document as document_langchain
            docs = [document_langchain(page_content=text)]
            text_splitter = CharacterTextSplitter(
                chunk_size=150, chunk_overlap=20, separator='\n')
            doc_texts = text_splitter.split_documents(docs)
            documents = [d.page_content for d in doc_texts if d.page_content.strip()]

            for documenta in documents:
                print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!切分 ")
                print(documenta)
            combined_str = "\n".join(documents)
            if not os.path.exists("output_dir"):
                os.makedirs("output_dir")
            output_path = os.path.join("output_dir/", file_name + ".txt")

            # 写入清理后的内容
            with open(output_path, 'w', encoding='utf-8') as f:
                f.write(combined_str)
            documents = self.split_and_append_documents(documents)
            for document in documents:
                # 添加数据

                self.local_incr()

                save_vec = VectorEntity(id=self.read_last_data(),
                                        # vector=text_emb.tolist(),
                                        vector=get_embedding(document),
                                        text=document,
                                        fileName=file_name,
                                        fileFrom=fileFrom,
                                        fileCity=fileCity,
                                        year=year,
                                        month=month,
                                        day=day
                                        )
                self.insert_one(save_vec, memory_name)
            return
        elif ext.lower() == '.csv' or ext.lower() == 'xlsx':
            return
        else:
            return

    def upload_file_to_milvus_poc(self, memory_name, file_path, file_name):

        name, ext = os.path.splitext(file_name)

        cate = ''
        year = 0
        month = 0
        tags = ''
        summary = ''

        if ext.lower() == '.txt':
            # 读取文本文件内容
            with open(file_path, 'r', encoding='utf-8') as f:
                text = f.read()

            # 切分 current_text到 texts中
            from langchain.docstore.document import Document as document_langchain
            docs = [document_langchain(page_content=text)]
            text_splitter = CharacterTextSplitter(
                chunk_size=250, chunk_overlap=20, separator='\n')
            doc_texts = text_splitter.split_documents(docs)
            documents = [d.page_content for d in doc_texts if d.page_content.strip()]

            for documenta in documents:
                print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!切分 ")
                print(documenta)
            combined_str = "\n".join(documents)
            if not os.path.exists("output_dir"):
                os.makedirs("output_dir")
            output_path = os.path.join("output_dir/", file_name + ".txt")

            # 写入清理后的内容
            with open(output_path, 'w', encoding='utf-8') as f:
                f.write(combined_str)
            documents = self.split_and_append_documents(documents)
            for document in documents:
                # 添加数据

                self.local_incr()

                save_vec = VectorEntityPoc(
                    # id=self.read_last_data(),
                    # vector=text_emb.tolist(),
                    dense_vector=get_embedding(document),
                    text=document,
                    fileName=file_name,
                    cate=cate,
                    year=year,
                    month=month,
                    tags=tags,
                    summary=summary
                )
                self.insert_one_poc(save_vec, memory_name)
            return
        else:
            return
