import os
import re
from abc import ABC
from datetime import datetime
from typing import Any, Mapping, Optional
from typing import List, Tuple
import torch


import numpy as np
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chains import RetrievalQA
from langchain.docstore.document import Document
from langchain.document_loaders import TextLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.llms.base import LLM
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import FAISS
from transformers import AutoModelForCausalLM, AutoTokenizer
from pydantic import BaseModel
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.text_splitter import CharacterTextSplitter
from langchain.text_splitter import TokenTextSplitter

device = "cuda:1"  # the device to load the model onto
EMBEDDING_DEVICE = "cuda:2"


#model = AutoModelForCausalLM.from_pretrained("/home/zhengzhenzhuang/models/qwen/Qwen2-7B",torch_dtype="auto",device_map="auto")
model = AutoModelForCausalLM.from_pretrained("/home/zhengzhenzhuang/models/qwen/Qwen2-7B", torch_dtype="auto",device_map="auto")

tokenizer = AutoTokenizer.from_pretrained("/home/zhengzhenzhuang/models/qwen/Qwen2-7B")


class Qwen(LLM, ABC):
    max_token: int = 10000 #10000
    temperature: float = 0.01  #0.01
    top_p = 0.9 #0.9
    #top_k = 50 #原来无次参数
    history_len: int = 3


    def __init__(self):
        super().__init__()

    @property
    def _llm_type(self) -> str:
        return "Qwen"

    @property
    def _history_len(self) -> int:
        return self.history_len

    def set_history_len(self, history_len: int = 10) -> None:
        self.history_len = history_len

    def _call(
            self,
            prompt: str,
            stop: Optional[List[str]] = None,
            run_manager: Optional[CallbackManagerForLLMRun] = None,
    ) -> str:
        messages = [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": prompt}
        ]
        text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        model_inputs = tokenizer([text], return_tensors="pt").to(device)
        generated_ids = model.generate(
            model_inputs.input_ids,
            max_new_tokens=512
        )
        generated_ids = [
            output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
        ]

        response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
        return response

    @property
    def _identifying_params(self) -> Mapping[str, Any]:
        """Get the identifying parameters."""
        return {"max_token": self.max_token,
                "temperature": self.temperature,
                "top_p": self.top_p,
                "history_len": self.history_len}


"""

class ChineseTextSplitter(CharacterTextSplitter):
    def __init__(self, pdf: bool = False, **kwargs):
        super().__init__(**kwargs)
        self.pdf = pdf

    def split_text(self, text: str) -> List[str]:
        if self.pdf:
            text = re.sub(r"\n{3,}", "\n", text)
            text = re.sub('\s', ' ', text)
            text = text.replace("\n\n", "")
        sent_sep_pattern = re.compile(
            '([﹒﹔﹖﹗．。！？]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))')
        sent_list = []
        for ele in sent_sep_pattern.split(text):
            if sent_sep_pattern.match(ele) and sent_list:
                sent_list[-1] += ele
            elif ele:
                sent_list.append(ele)
        return sent_list
 """


class ChineseTextSplitter(CharacterTextSplitter):
    def __init__(self, pdf: bool = False, sentence_size: int = 100, **kwargs):
        super().__init__(**kwargs)
        self.pdf = pdf
        self.sentence_size = sentence_size

    def split_text(self, text: str) -> List[str]:
        if self.pdf:
            text = re.sub(r"\n{3,}", r"\n", text)
            text = re.sub('\s', " ", text)
            text = re.sub("\n\n", "", text)

        text = re.sub(r'([;；.!?。！？\?])([^”’])', r"\1\n\2", text)  # 单字符断句符
        text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text)  # 英文省略号
        text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text)  # 中文省略号
        text = re.sub(r'([;；!?。！？\?]["’”」』]{0,2})([^;；!?，。！？\?])', r'\1\n\2', text)
        # 如果双引号前有终止符，那么双引号才是句子的终点，把分句符\n放到双引号后，注意前面的几句都小心保留了双引号
        text = text.rstrip()  # 段尾如果有多余的\n就去掉它
        # 很多规则中会考虑分号;，但是这里我把它忽略不计，破折号、英文双引号等同样忽略，需要的再做些简单调整即可。
        ls = [i for i in text.split("\n") if i]
        for ele in ls:
            if len(ele) > self.sentence_size:
                ele1 = re.sub(r'([,，.]["’”」』]{0,2})([^,，.])', r'\1\n\2', ele)
                ele1_ls = ele1.split("\n")
                for ele_ele1 in ele1_ls:
                    if len(ele_ele1) > self.sentence_size:
                        ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1)
                        ele2_ls = ele_ele2.split("\n")
                        for ele_ele2 in ele2_ls:
                            if len(ele_ele2) > self.sentence_size:
                                ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2)
                                ele2_id = ele2_ls.index(ele_ele2)
                                ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[
                                                                                                       ele2_id + 1:]
                        ele_id = ele1_ls.index(ele_ele1)
                        ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:]

                id = ls.index(ele)
                ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:]
        return ls


def find_txt_files_in_dir(directory):
    file_list = []
    # 获取指定目录下的所有文件和文件夹名
    files_in_dir = os.listdir(directory)
    # 使用列表推导式过滤出 .txt 文件
    txt_files = [f for f in files_in_dir if f.endswith('.txt')]
    # 打印出 .txt 文件的完整路径（仅包括文件名，因为不遍历子文件夹）
    for txt_file in txt_files:
        file_list.append(os.path.join(directory, txt_file))
    return file_list


def load_file(filepath):
    loader = TextLoader(filepath, autodetect_encoding=True)
    #textsplitter = ChineseTextSplitter(pdf=False)
    textsplitter =  RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=60) #ok
    #textsplitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=20) #导致显存不足
    #textsplitter = TokenTextSplitter(chunk_size=1000, chunk_overlap=100)

    docs = loader.load_and_split(textsplitter)
    write_check_file(filepath, docs)
    return docs


def write_check_file(filepath, docs):
    folder_path = os.path.join(os.path.dirname(filepath), "tmp_files")
    if not os.path.exists(folder_path):
        os.makedirs(folder_path)
    fp = os.path.join(folder_path, 'load_file.txt')
    with open(fp, 'a+', encoding='utf-8') as fout:
        fout.write("filepath=%s,len=%s" % (filepath, len(docs)))
        fout.write('\n')
        for i in docs:
            fout.write(str(i))
            fout.write('\n')
        fout.close()


def separate_list(ls: List[int]) -> List[List[int]]:
    lists = []
    ls1 = [ls[0]]
    for i in range(1, len(ls)):
        if ls[i - 1] + 1 == ls[i]:
            ls1.append(ls[i])
        else:
            lists.append(ls1)
            ls1 = [ls[i]]
    lists.append(ls1)
    return lists


class FAISSWrapper(FAISS):
    chunk_size = 500 #250
    chunk_conent = True
    score_threshold = 0

    def similarity_search_with_score_by_vector(
            self, embedding: List[float], k: int = 4
    ) -> List[Tuple[Document, float]]:
        scores, indices = self.index.search(np.array([embedding], dtype=np.float32), k)
        docs = []
        id_set = set()
        store_len = len(self.index_to_docstore_id)
        for j, i in enumerate(indices[0]):
            if i == -1 or 0 < self.score_threshold < scores[0][j]:
                # This happens when not enough docs are returned.
                continue
            _id = self.index_to_docstore_id[i]
            doc = self.docstore.search(_id)
            if not self.chunk_conent:
                if not isinstance(doc, Document):
                    raise ValueError(f"Could not find document for id {_id}, got {doc}")
                doc.metadata["score"] = int(scores[0][j])
                docs.append(doc)
                continue
            id_set.add(i)
            docs_len = len(doc.page_content)
            for k in range(1, max(i, store_len - i)):
                break_flag = False
                for l in [i + k, i - k]:
                    if 0 <= l < len(self.index_to_docstore_id):
                        _id0 = self.index_to_docstore_id[l]
                        doc0 = self.docstore.search(_id0)
                        if docs_len + len(doc0.page_content) > self.chunk_size:
                            break_flag = True
                            break
                        elif doc0.metadata["source"] == doc.metadata["source"]:
                            docs_len += len(doc0.page_content)
                            id_set.add(l)
                if break_flag:
                    break
        if not self.chunk_conent:
            return docs
        if len(id_set) == 0 and self.score_threshold > 0:
            return []
        id_list = sorted(list(id_set))
        id_lists = separate_list(id_list)
        for id_seq in id_lists:
            for id in id_seq:
                if id == id_seq[0]:
                    _id = self.index_to_docstore_id[id]
                    doc = self.docstore.search(_id)
                else:
                    _id0 = self.index_to_docstore_id[id]
                    doc0 = self.docstore.search(_id0)
                    doc.page_content += " " + doc0.page_content
            if not isinstance(doc, Document):
                raise ValueError(f"Could not find document for id {_id}, got {doc}")
            doc_score = min([scores[0][id] for id in [indices[0].tolist().index(i) for i in id_seq if i in indices[0]]])
            doc.metadata["score"] = int(doc_score)
            docs.append((doc, doc_score))
        return docs


def reload_file():
    print("开始加载知识库")

    load_time = datetime.now()
    docs = []
    file_list = find_txt_files_in_dir("/home/zhengzhenzhuang/models/qwen/document/")
    for file in file_list:
        print(file)
        docs.extend(load_file(file))

    EMBEDDING_MODEL = 'bge-large-zh-v1.5'
    embedding_model_dict = {
        "bge-large-zh-v1.5": "/home/zhengzhenzhuang/models/qwen/bge-large-zh-v1.5",
    }
    embeddings = HuggingFaceEmbeddings(model_name=embedding_model_dict[EMBEDDING_MODEL],
                                       model_kwargs={'device': EMBEDDING_DEVICE})
    docsearch = FAISSWrapper.from_documents(docs, embeddings)
    second = (load_time - datetime.now()).total_seconds()
    print(f"加载知识库耗时{second}")
    return docsearch


docsearch = reload_file()

def q_a(question):
    # load docs (pdf file or txt file)
    # filepath = '/home/zhengzhenzhuang/models/qwen/document/demo.txt'
    # filepath = "/home/zhengzhenzhuang/models/qwen/document/demo/"
    # Embedding model name

    """
    EMBEDDING_MODEL = 'text2vec'
    embedding_model_dict = {
        "text2vec": "/home/zhengzhenzhuang/models/text2vec-large-chinese",
    }
    """

    PROMPT_TEMPLATE = """基于以下已知内容，简洁和专业的来回答用户的问题。如果无法从中得到答案或者问题不完善，请说"抱歉，在文档中没有找到与提问相关的内容，请尝试换个问题问问吧。"。答案请使用中文。已知内容:{context_str}。问题:{question}"""

    # return top-k text chunk from vector store
    VECTOR_SEARCH_TOP_K = 10

    #  stuff  map_reduce  refine  map_rerank
    CHAIN_TYPE = 'stuff'


    llm = Qwen()

    prompt = PromptTemplate(
        template=PROMPT_TEMPLATE, input_variables=["context_str", "question"]
    )

    chain_type_kwargs = {"prompt": prompt, "document_variable_name": "context_str"}
    qa = RetrievalQA.from_chain_type(
        llm=llm,
        chain_type=CHAIN_TYPE,
        retriever=docsearch.as_retriever(search_kwargs={"k": VECTOR_SEARCH_TOP_K}),
        chain_type_kwargs=chain_type_kwargs)

    answer = qa.run(question)
    torch.cuda.empty_cache()

    # 使用正则表达式找到第一个\n后面跟着一个英文字符的位置
    match = re.search(r'\n[a-zA-Z]', answer)

    # 如果找到了匹配项
    if match:
        # 获取匹配项的开始位置（索引），这个位置实际上是\n的位置
        index = match.start()
        # 输出\n之前的字符串
        return answer[:index]
    else:
        return answer


from fastapi import FastAPI

app = FastAPI()
import uvicorn


class MyClassModel(BaseModel):
    question: str


@app.get("/")
def read_root():
    return {"Hello": "World"}


@app.get("/reload/file")
def reload_file():

    return {"answer": "success"}


@app.post("/qwen2/api")
async def create_myclass(myclass: MyClassModel):
    t1 = datetime.now()
    question = myclass.question
    print(question)
    answer = q_a(question)

    t2 = datetime.now()
    second = (t1 - t2).total_seconds()
    print(f"耗时{second}")
    return {"answer": f"{answer}"}


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8091)
