import pandas as pd
import asyncio
import os
import sys
import requests
import asyncio 
import uuid

sys.path.append("/home/xiaoji-ai/workspace/projects/xiaoji_QA/langchainQA_server")

from typing import List, Union, Callable
from langchain.docstore.document import Document
from langchain.document_loaders import UnstructuredFileLoader, TextLoader
from langchain.document_loaders import UnstructuredWordDocumentLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores.milvus import Milvus

from app.textsplitter.chinese_text_splitter_new import ChineseTextSplitter
from app.textsplitter.splitter1 import ChineseTextSplitter1
from app.textsplitter.chinese_recursive_text_splitter import (
    ChineseRecursiveTextSplitter,
)
from langchain_elasticsearch import ElasticsearchStore,ElasticsearchRetriever
from app.loader import UnstructuredPaddlePDFLoader
from app.textsplitter import zh_title_enhance
from app.utils.general_utils import *
from app.config.milvus import *
from app.schema.file import *
from app.loader.csv_loader import CSVLoader
from app.config.model_config import SENTENCE_SIZE, ZH_TITLE_ENHANCE, UPLOAD_ROOT_PATH
from app.config.log_config import loggerqa

from langchain_elasticsearch import ElasticsearchRetriever
text_splitter = RecursiveCharacterTextSplitter(
    separators=[
        "\n",
        ".",
        "。",
        "!",
        "！",
        "?",
        "？",
        "；",
        ";",
        "……",
        "…",
        "、",
        "，",
        ",",
        " ",
    ],
    chunk_size=400,
    length_function=num_tokens,
)


class LocalFile:
    def __init__(self, file: MilvusFileSchema, milvus: Milvus):
        self.file_id = file.file_id
        self.docs: List[Document] = []
        self.file_name = self.generate_uuid(10) + os.path.split(file.url)[-1]
        self.url = file.url
        self.vector_store = milvus
        # self.es_store=es_store
        self.source = file.file_name
        

        if isinstance(self.file_name, str):
            curr_pwd = os.getcwd()
            self.file_path = os.path.join(curr_pwd, "data_files", self.file_name)

    async def split_file_to_docs(
        self,
        is_insert: bool = True,
        ocr_engine: Callable = get_pdf_text,
        sentence_size=SENTENCE_SIZE,
        using_zh_title_enhance=ZH_TITLE_ENHANCE,
    ):
        if self.file_path.lower().endswith(".txt"):
            loader = TextLoader(self.file_path, autodetect_encoding=True)
            texts_splitter = ChineseTextSplitter1(
                pdf=False, sentence_size=sentence_size
            )
            docs = loader.load_and_split(texts_splitter)
        elif self.file_path.lower().endswith(".pdf"):
            loader = UnstructuredPaddlePDFLoader(self.file_path, ocr_engine)
            texts_splitter = ChineseTextSplitter1(pdf=True, sentence_size=sentence_size)
            docs = loader.load_and_split(texts_splitter)
        elif self.file_path.lower().endswith(".doc") or self.file_path.lower().endswith(
            ".docx"
        ):
            loader = UnstructuredWordDocumentLoader(self.file_path, mode="elements")
            texts_splitter = ChineseTextSplitter1(
                pdf=False, sentence_size=sentence_size
            )
            docs = loader.load_and_split(texts_splitter)
        elif self.file_path.lower().endswith(".xlsx"):
            csv_file_path = self.file_path[:-5] + ".csv"
            xlsx = pd.read_excel(self.file_path, engine="openpyxl")
            xlsx.to_csv(csv_file_path, index=False)
            loader = CSVLoader(
                csv_file_path, csv_args={"delimiter": ",", "quotechar": '"'}
            )
            docs = loader.load()
        else:
            raise TypeError("文件类型不支持，目前仅支持：[pdf,docx,xlsx,doc]")

        if using_zh_title_enhance:
            loggerqa.info("using_zh_title_enhance %s", using_zh_title_enhance)
            docs = zh_title_enhance(docs)

        # 重构docs，如果doc的文本长度大于800tokens，则利用text_splitter将其拆分成多个doc
        # text_splitter: RecursiveCharacterTextSplitter
        # loggerqa.info(f"before 2nd split doc lens: {len(docs)}")
        # docs = text_splitter.split_documents(docs)
        # loggerqa.info(f"after 2nd split doc lens: {len(docs)}")
            
        ans = ''
        for doc in docs:
            ans += doc.page_content + '\n'

        if len(ans) < 5000:
            doc = Document(page_content=ans, metadata={})
            docs = [doc]
        elif len(ans) > 5000:
            docs = await self.split_text(ans)

        # 这里给每个docs片段的metadata里注入file_id
        for doc in docs:
            doc.metadata = {}
            doc.metadata["file_id"] = self.file_id
            doc.metadata["file_name"] = self.source
        if docs:
            loggerqa.info(
                "langchain analysis content head: %s", docs[0].page_content[:100]
            )
        else:
            loggerqa.info("langchain analysis docs is empty!")

        self.docs = docs
        if is_insert:
            await self.insert_to_store()  # 上传到数据库
        await self.delete_file_async()  # 删除文件

        if not is_insert:
            return ans

    async def insert_to_store(self):
        # await self.es_store.aadd_documents(self.docs)
        print("文件上传成功！")
        await self.vector_store.aadd_documents(self.docs)

    async def delete_file_async(self):
        loop = asyncio.get_running_loop()
        await loop.run_in_executor(None, os.remove, self.file_path)
        if self.file_path.lower().endswith(".pdf"):
            file_path = os.path.join(
                os.path.dirname(self.file_path), "temp_files", self.file_name + ".txt"
            )
            loop1 = asyncio.get_running_loop()
            await loop1.run_in_executor(None, os.remove, file_path)
        elif self.file_path.lower().endswith(".xlsx"):
            last_dot_index = self.file_name.rfind(".")
            new_string = ""
            if last_dot_index != -1:
                new_string = self.file_name[:last_dot_index]

            file_path = os.path.join(
                os.path.dirname(self.file_path), new_string + ".csv"
            )
            loop1 = asyncio.get_running_loop()
            await loop1.run_in_executor(None, os.remove, file_path)
        print("文件删除成功！")

    async def download_file(self):
        pwd = os.getcwd()
        response = requests.get(
            self.url, stream=True
        )  # 使用stream=True来分块读取响应内容

        if response.status_code == 200:
            with open(pwd + "/data_files/" + self.file_name, "wb") as f:
                for chunk in response.iter_content(chunk_size=8192):  # 分块写入文件
                     f.write(chunk)
            print("文件下载成功！")
        else:
            print("文件下载失败，状态码：", response.status_code)

    def generate_uuid(self, length):
        full_uuid = uuid.uuid4()
        uuid_str = str(full_uuid).replace('-', '')[:length]
        return uuid_str
    
    async def split_text(self,text: str):
        arr = text.split('\n')
        docs = []
        doc = ''
        for i in range(len(arr)):
            doc += arr[i] + '\n'
            if len(doc) > 2950:
                temp = Document(page_content=doc, metadata={})
                docs.append(temp)
                doc = ''
        return docs

if __name__ == "__main__":
    import asyncio
    from app.schema.file import *
    from app.config.es import EsConfig
    from app.retriever.es import MyElasticsearch
    from app.retriever.milvus import MilvusVectorStore
    
    file_milvus = MilvusFileSchema(
        file_id="123456789",
        file_name="test.pdf",
        url="https://langchainqa-hangzhou.oss-cn-hangzhou.aliyuncs.com/upfile/414efaa2-250a-490f-a3ed-da1eaa1ef7841_阿里的安卓开发规范.pdf")
    milvus = MilvusVectorStore("a2b89629e0a8940398fb0f6c5365f9611")
    es_store = MyElasticsearch("a2b89629e0a8940398fb0f6c5365f9611") # index_name 不能以横杠开头
    local_file = LocalFile(file_milvus, milvus=milvus)
    asyncio.run(local_file.download_file()) # 下载文件到本地
    asyncio.run(local_file.split_file_to_docs()) # 切割文件,上传并删除本地文件


# /home/xiaoji-ai/workspace/projects/xiaoji_QA/langchainQA_server/data_files/c5ece1baf88fc1ee8d-7502-4122-82e0-3374004c9a71副本政策3.csv

# /home/xiaoji-ai/workspace/projects/xiaoji_QA/langchainQA_server/data_files/c5ece1baf88fc1ee8d-7502-4122-82e0-3374004c9a71副本政策3.25.csv