import os
import re
from abc import ABC
from datetime import datetime
from typing import Any, Mapping, Optional
from typing import List

import uvicorn
from fastapi import FastAPI
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chains.question_answering import load_qa_chain
from langchain.document_loaders import TextLoader

from langchain.llms.base import LLM
from langchain.prompts.prompt import PromptTemplate
from langchain.text_splitter import CharacterTextSplitter
# from langchain_community.document_loaders import TextLoader
# from langchain_community.vectorstores import FAISS
from pydantic import BaseModel
from transformers import AutoModelForCausalLM, AutoTokenizer

app = FastAPI()

device = "cuda:0"  # the device to load the model onto
#EMBEDDING_DEVICE = "cuda:7"

# model = AutoModelForCausalLM.from_pretrained("/home/zhengzhenzhuang/models/qwen/Qwen2-7B",torch_dtype="auto",device_map="auto")
model = AutoModelForCausalLM.from_pretrained("/home/zhengzhenzhuang/models/Qwen2-7B", torch_dtype="auto",
                                             device_map=device)

tokenizer = AutoTokenizer.from_pretrained("/home/zhengzhenzhuang/models/Qwen2-7B")


class Qwen(LLM, ABC):
    max_token: int = 10000  # 10000
    temperature: float = 0.01  # 0.01
    top_p = 0.9  # 0.9
    # top_k = 50 #原来无次参数
    history_len: int = 3

    def __init__(self):
        super().__init__()

    @property
    def _llm_type(self) -> str:
        return "Qwen"

    @property
    def _history_len(self) -> int:
        return self.history_len

    def set_history_len(self, history_len: int = 10) -> None:
        self.history_len = history_len

    def _call(
            self,
            prompt: str,
            stop: Optional[List[str]] = None,
            run_manager: Optional[CallbackManagerForLLMRun] = None,
    ) -> str:
        messages = [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": prompt}
        ]
        text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        model_inputs = tokenizer([text], return_tensors="pt").to(device)
        generated_ids = model.generate(
            model_inputs.input_ids,
            max_new_tokens=512
        )
        generated_ids = [
            output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
        ]

        response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
        return response

    @property
    def _identifying_params(self) -> Mapping[str, Any]:
        """Get the identifying parameters."""
        return {"max_token": self.max_token,
                "temperature": self.temperature,
                "top_p": self.top_p,
                "history_len": self.history_len}


def find_txt_files_in_dir(directory):
    file_list = []
    # 获取指定目录下的所有文件和文件夹名
    files_in_dir = os.listdir(directory)
    # 使用列表推导式过滤出 .txt 文件
    txt_files = [f for f in files_in_dir if f.endswith('.txt')]
    # 打印出 .txt 文件的完整路径（仅包括文件名，因为不遍历子文件夹）
    for txt_file in txt_files:
        file_list.append(os.path.join(directory, txt_file))
    return file_list


def load_file(filepath):
    loader = TextLoader(filepath, autodetect_encoding=True)
    # textsplitter = ChineseTextSplitter(pdf=False)
    # textsplitter =  RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=60) #ok
    textsplitter = CharacterTextSplitter(chunk_size=800, chunk_overlap=40)  # 导致显存不足
    # textsplitter = TokenTextSplitter(chunk_size=1000, chunk_overlap=100)

    docs = loader.load_and_split(textsplitter)
    # write_check_file(filepath, docs)
    return docs


'''
如果已经创建好了，可以直接读取
db = FAISS.load_local("/home/zhengzhenzhuang/models/qwen/question/db", embeddings)
'''
prompt_template = """我将给你一个知识文本context,以及一个与你的工作有关的问题question.
             如果你在context中无法搜寻到问题的答案,即使你本身知道答案但我也请你不要回答,只需要告诉我你不知道答案就行.
             知识文本为:{context},
             问题为:{question}
             """
# 传入向量去搜索
PROMPT = PromptTemplate(
    template=prompt_template, input_variables=["context", "question"]
)


def q_a(question, fileName):
    path=f"/home/zhengzhenzhuang/test/question/{fileName}"
    print(path)
    doc_content = load_file(path)
    if len(doc_content[0].page_content) > 6000:
        doc_content[0].page_content = doc_content[0].page_content[0:6000]

    chain = load_qa_chain(Qwen(), chain_type="stuff", prompt=PROMPT)
    output = chain({"input_documents": doc_content, "question": question}, return_only_outputs=True)
    print(output)
    answer = output['output_text']
    match = re.search(r'\n[a-zA-Z]', answer)
    # 如果找到了匹配项
    if match:
        index = match.start()
        #print(answer[:index])
        return answer[:index]

    else:
        #print(answer)
        return answer


class MyClassModel(BaseModel):
    question: str
    fileName: str


@app.get("/")
def read_root():
    return {"Hello": "World"}


@app.get("/reload/file")
def reload_file():
    return {"answer": "success"}


@app.post("/qwen2/api")
async def create_myclass(myclass: MyClassModel):
    t1 = datetime.now()
    question = myclass.question
    fileName = myclass.fileName
    print(question)
    print(fileName)
    answer = q_a(question, fileName)

    t2 = datetime.now()
    second = (t1 - t2).total_seconds()
    print(f"耗时{second}")
    #return {"answer": f"{answer}"}
    return answer


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8099)
