import os
from datetime import datetime

import uvicorn
from fastapi import FastAPI
from langchain.document_loaders import TextLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
# from langchain_community.document_loaders import TextLoader
# from langchain_community.vectorstores import FAISS
from langchain.vectorstores import FAISS
from pydantic import BaseModel

app = FastAPI()

device = "auto"  # the device to load the model onto
EMBEDDING_DEVICE = "cuda:2"

# model = AutoModelForCausalLM.from_pretrained("/home/zhengzhenzhuang/models/qwen/Qwen2-7B",torch_dtype="auto",device_map="auto")


from openai import OpenAI

client = OpenAI(
    base_url="http://192.168.80.35:8000/v1",
    api_key="token-abc123",
)


def answer(prompt):
    completion = client.chat.completions.create(
        model="/home/zhengzhenzhuang/liujian/model/Qwen2-7B-Instruction",
        messages=[
            {"role": "user", "content": prompt}
        ]
    )
    print(completion.choices[0].message.content)
    return completion.choices[0].message.content


def find_txt_files_in_dir(directory):
    file_list = []
    # 获取指定目录下的所有文件和文件夹名
    files_in_dir = os.listdir(directory)
    # 使用列表推导式过滤出 .txt 文件
    txt_files = [f for f in files_in_dir if f.endswith('.txt')]
    # 打印出 .txt 文件的完整路径（仅包括文件名，因为不遍历子文件夹）
    for txt_file in txt_files:
        file_list.append(os.path.join(directory, txt_file))
    return file_list


def load_file(filepath):
    loader = TextLoader(filepath, autodetect_encoding=True)
    # textsplitter = ChineseTextSplitter(pdf=False)
    textsplitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)  # ok
    # textsplitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=20)  # 导致显存不足
    # textsplitter = TokenTextSplitter(chunk_size=1000, chunk_overlap=100)

    docs = loader.load_and_split(textsplitter)
    # write_check_file(filepath, docs)
    return docs


print("开始加载知识库")
load_time = datetime.now()
docs = []
file_list = find_txt_files_in_dir("/home/zhengzhenzhuang/models/tender/document/")
for file in file_list:
    print(file)
    docs.extend(load_file(file))

EMBEDDING_MODEL = 'bge-large-zh-v1.5'
embedding_model_dict = {
    "bge-large-zh-v1.5": "/home/zhengzhenzhuang/models/qwen/bge-large-zh-v1.5",
}
embeddings = HuggingFaceEmbeddings(model_name=embedding_model_dict[EMBEDDING_MODEL],
                                   model_kwargs={'device': EMBEDDING_DEVICE})
# embedding = HuggingFaceEmbeddings(model_name='/home/zhengzhenzhuang/models/qwen/bge-large-zh-v1.5')
# 创建向量数据库
db = FAISS.from_documents(docs, embeddings)
# 保存
db.save_local("/home/zhengzhenzhuang/models/tender/document/db")
'''
如果已经创建好了，可以直接读取
db = FAISS.load_local("/home/zhengzhenzhuang/models/tender/document/db", embeddings)
'''
second = (load_time - datetime.now()).total_seconds()
print(f"加载知识库耗时{second}")

doc_content = ""

# 打开文件
with open('/home/zhengzhenzhuang/models/tender/document/08.txt', 'r', encoding='utf-8') as file:
    # 读取所有行到一个列表中
    lines = file.readlines()
    for line in lines:
        doc_content = doc_content + line

doc_content = doc_content[:20000]


def q_a2(question):
    # docs = db.similarity_search(query, k=3)
    # print(docs[0].page_content)
    # print("\n\n")

    # docs = db.similarity_search_with_score(query, k=3)  # 带分数的
    # print(docs)

    prompt_template = """我将给你一个知识文本context,以及一个与你的工作有关的问题question.
                 如果你在context中无法搜寻到问题的答案,即使你本身知道答案但我也请你不要回答,只需要告诉我你不知道答案就行.
                 知识文本为:{context},
                 问题为:{question}
                 """
    # 传入向量去搜索


    text_content = ""

    path = "/home/zhengzhenzhuang/models/tender/document/08.txt"
    print(path)

    prompt_text = prompt_template.format(context=doc_content, question=question)

    # 创建 PromptTemplate 对象
    # PROMPT = PromptTemplate(template=prompt_text, input_variables=["context", "question"])
    # print(PROMPT.template)



    result = answer(prompt_text)
    print(result)
    return result

    """
    chain = load_qa_chain(client, chain_type="stuff", prompt=PROMPT)
    output = chain({"input_documents": doc_list, "question": question}, return_only_outputs=True)
    #output=chain.run(input_documents=doc_list, question=question)
    print(output)
    answer = output['output_text']


    # 使用正则表达式找到第一个\n后面跟着一个英文字符的位置
    match = re.search(r'\n[a-zA-Z]', answer)
    # 如果找到了匹配项
    if match:
        # 获取匹配项的开始位置（索引），这个位置实际上是\n的位置
        index = match.start()
        # 输出\n之前的字符串
        return answer[:index]
    else:
        return answer
    """


class MyClassModel(BaseModel):
    question: str


@app.get("/")
def read_root():
    return {"Hello": "World"}


@app.get("/reload/file")
def reload_file():
    return {"answer": "success"}


@app.post("/qwen2/api")
async def create_myclass(myclass: MyClassModel):
    t1 = datetime.now()
    question = myclass.question
    print(question)
    answer = q_a2(question)

    t2 = datetime.now()
    second = (t1 - t2).total_seconds()
    print(f"耗时{second}")
    return {"answer": f"{answer}"}
    #return f"{answer}"


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8093)

