import os
import re
from abc import ABC
from datetime import datetime
from typing import Any, Mapping, Optional
from typing import List, Tuple
import torch
import torch.nn as nn
from langchain.chains.question_answering import load_qa_chain
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
# from langchain_community.document_loaders import TextLoader
# from langchain_community.vectorstores import FAISS
from langchain.vectorstores import FAISS
from langchain.document_loaders import TextLoader
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from pydantic import BaseModel
from langchain.llms.base import LLM
from langchain.prompts.prompt import PromptTemplate
from transformers import AutoModelForCausalLM, AutoTokenizer
from langchain.callbacks.manager import CallbackManagerForLLMRun

from fastapi import FastAPI
import uvicorn

app = FastAPI()

device = "cuda:1"  # the device to load the model onto
EMBEDDING_DEVICE = "cuda:7"

# model = AutoModelForCausalLM.from_pretrained("/home/zhengzhenzhuang/models/qwen/Qwen2-7B",torch_dtype="auto",device_map="auto")
model = AutoModelForCausalLM.from_pretrained("/home/zhengzhenzhuang/models/qwen/Qwen2-7B", torch_dtype="auto",
                                             device_map="auto")

tokenizer = AutoTokenizer.from_pretrained("/home/zhengzhenzhuang/models/qwen/Qwen2-7B")


class Qwen(LLM, ABC):
    max_token: int = 10000  # 10000
    temperature: float = 0.01  # 0.01
    top_p = 0.9  # 0.9
    # top_k = 50 #原来无次参数
    history_len: int = 3

    def __init__(self):
        super().__init__()

    @property
    def _llm_type(self) -> str:
        return "Qwen"

    @property
    def _history_len(self) -> int:
        return self.history_len

    def set_history_len(self, history_len: int = 10) -> None:
        self.history_len = history_len

    def _call(
            self,
            prompt: str,
            stop: Optional[List[str]] = None,
            run_manager: Optional[CallbackManagerForLLMRun] = None,
    ) -> str:
        messages = [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": prompt}
        ]
        text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        model_inputs = tokenizer([text], return_tensors="pt").to(device)
        generated_ids = model.generate(
            model_inputs.input_ids,
            max_new_tokens=512
        )
        generated_ids = [
            output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
        ]

        response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
        return response

    @property
    def _identifying_params(self) -> Mapping[str, Any]:
        """Get the identifying parameters."""
        return {"max_token": self.max_token,
                "temperature": self.temperature,
                "top_p": self.top_p,
                "history_len": self.history_len}


def find_txt_files_in_dir(directory):
    file_list = []
    # 获取指定目录下的所有文件和文件夹名
    files_in_dir = os.listdir(directory)
    # 使用列表推导式过滤出 .txt 文件
    txt_files = [f for f in files_in_dir if f.endswith('.txt')]
    # 打印出 .txt 文件的完整路径（仅包括文件名，因为不遍历子文件夹）
    for txt_file in txt_files:
        file_list.append(os.path.join(directory, txt_file))
    return file_list


def load_file(filepath):
    loader = TextLoader(filepath, autodetect_encoding=True)
    # textsplitter = ChineseTextSplitter(pdf=False)
    # textsplitter =  RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=60) #ok
    textsplitter = CharacterTextSplitter(chunk_size=400, chunk_overlap=40)  # 导致显存不足
    # textsplitter = TokenTextSplitter(chunk_size=1000, chunk_overlap=100)

    docs = loader.load_and_split(textsplitter)
    # write_check_file(filepath, docs)
    return docs


print("开始加载知识库")
load_time = datetime.now()
docs = []
file_list = find_txt_files_in_dir("/home/zhengzhenzhuang/models/qwen/document/")
for file in file_list:
    print(file)
    docs.extend(load_file(file))

EMBEDDING_MODEL = 'bge-large-zh-v1.5'
embedding_model_dict = {
    "bge-large-zh-v1.5": "/home/zhengzhenzhuang/models/qwen/bge-large-zh-v1.5",
}
embeddings = HuggingFaceEmbeddings(model_name=embedding_model_dict[EMBEDDING_MODEL],
                                   model_kwargs={'device': EMBEDDING_DEVICE})
# embedding = HuggingFaceEmbeddings(model_name='/home/zhengzhenzhuang/models/qwen/bge-large-zh-v1.5')
# 创建向量数据库
db = FAISS.from_documents(docs, embeddings)
# 保存
db.save_local("/home/zhengzhenzhuang/models/qwen/document/db")
'''
如果已经创建好了，可以直接读取
db = FAISS.load_local("/home/zhengzhenzhuang/models/qwen/document/db", embeddings)
'''
prompt_template = """我将给你一个知识文本context,以及一个与你的工作有关的问题question.
             如果你在context中无法搜寻到问题的答案,即使你本身知道答案但我也请你不要回答,只需要告诉我你不知道答案就行.
             知识文本为:{context},
             问题为:{question}
             """
# 传入向量去搜索
PROMPT = PromptTemplate(
    template=prompt_template, input_variables=["context", "question"]
)

second = (load_time - datetime.now()).total_seconds()
print(f"加载知识库耗时{second}")


def q_a(question):
    embedding_vector = embeddings.embed_query(question)
    docs = db.similarity_search_by_vector(embedding_vector, k=5)
    n = 0
    doc_list = []

    if len(docs[0].page_content) > 5000:
        doc_list.append(docs[0])
    else:
        for doc in docs:
            n = n + len(doc.page_content)
            print(n)
            if n < 10000:
                print(doc.page_content)
                doc_list.append(doc)
            else:
                break

    for doc in doc_list:
        print(f"==================={len(doc.page_content)}======================")
        print(doc.page_content)


    """
    chain = load_qa_chain(Qwen(), chain_type="stuff", prompt=PROMPT)
    output = chain({"input_documents": doc_list, "question": question}, return_only_outputs=True)
    print(output)
    answer = output['output_text']
    match = re.search(r'\n[a-zA-Z]', answer)
    # 如果找到了匹配项
    if match:
        index = match.start()
        print(answer[:index])
    else:
        print(answer)

    """


q_a("中海油公司英文名称是什么")
