import os
from datetime import datetime

import uvicorn
from fastapi import FastAPI
from langchain.document_loaders import TextLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
# from langchain_community.document_loaders import TextLoader
# from langchain_community.vectorstores import FAISS
from langchain.vectorstores import FAISS
from pydantic import BaseModel

app = FastAPI()

EMBEDDING_DEVICE = "cpu"

from openai import OpenAI

client = OpenAI(
    base_url="http://192.168.80.35:8000/v1",
    api_key="token-abc123",
)

def answer(prompt):
    completion = client.chat.completions.create(
        model="/home/zhengzhenzhuang/liujian/model/Qwen2-72B-Instruct-GPTQ-Int8",
        #model="/home/zhengzhenzhuang/liujian/model/Qwen2-7B-Instruction",
        messages=[
            {"role": "user", "content": prompt}
        ],
        temperature=0,

        #stream=True
    )
    return completion.choices[0].message.content


def find_txt_files_in_dir(directory):
    file_list = []
    # 获取指定目录下的所有文件和文件夹名
    files_in_dir = os.listdir(directory)
    # 使用列表推导式过滤出 .txt 文件
    txt_files = [f for f in files_in_dir if f.endswith('.txt')]
    # 打印出 .txt 文件的完整路径（仅包括文件名，因为不遍历子文件夹）
    for txt_file in txt_files:
        file_list.append(os.path.join(directory, txt_file))
    return file_list


def load_file(filepath):
    loader = TextLoader(filepath, autodetect_encoding=True)
    # textsplitter = ChineseTextSplitter(pdf=False)
    # textsplitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)  # ok
    textsplitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50)  # 导致显存不足
    # textsplitter = TokenTextSplitter(chunk_size=1000, chunk_overlap=100)

    docs = loader.load_and_split(textsplitter)
    # write_check_file(filepath, docs)
    return docs


def vectorSearch(filepath, question):
    print("向量搜索")
    print("开始加载知识库")
    load_time = datetime.now()
    docs = load_file(filepath)

    EMBEDDING_MODEL = 'bge-large-zh-v1.5'
    embedding_model_dict = {
        "bge-large-zh-v1.5": "/home/zhengzhenzhuang/models/qwen/bge-large-zh-v1.5",
    }
    embeddings = HuggingFaceEmbeddings(model_name=embedding_model_dict[EMBEDDING_MODEL],
                                       model_kwargs={'device': EMBEDDING_DEVICE})
    # embedding = HuggingFaceEmbeddings(model_name='/home/zhengzhenzhuang/models/qwen/bge-large-zh-v1.5')
    # 创建向量数据库
    db = FAISS.from_documents(docs, embeddings)
    # 保存
    db.save_local("/home/zhengzhenzhuang/models/tender/document/db")
    '''
    如果已经创建好了，可以直接读取
    db = FAISS.load_local("/home/zhengzhenzhuang/models/tender/document/db", embeddings)
    '''

    # 传入向量去搜索
    embedding_vector = embeddings.embed_query(question)
    docs = db.similarity_search_by_vector(embedding_vector, k=10)

    text_content = ""
    for doc in docs:
        text_len = f"==================={len(doc.page_content)}======================"
        print(text_len)
        text_content = text_content + doc.page_content

    second = (load_time - datetime.now()).total_seconds()
    print(f"加载知识库耗时{second}")
    return text_content


def q_a2(question, chapter):




    prompt_template = """我给你一个markdown格式的表格文本，请帮我完整输出内容.
             表格文本为:{context}
             """




    prompt_template = """我给你一个知识文本,以及一个相关的问题,请你根据知识文本的原文回答我的问题,原文的内容要全部输出来，不能够省略，不能总结。如果遇到表格的内容，按照每一点原文罗列出来。如果你无法搜寻到问题的答案,只需要告诉我你不知道答案就行.
                 问题为:{question},
                 知识文本为:{context}
                 """

    doc_content = ""
    # 打开文件
    path = f'/home/zhengzhenzhuang/models/tender/document/{chapter}.txt'
    print(path)
    with open(path, 'r', encoding='utf-8') as file:
        # 读取所有行到一个列表中
        lines = file.readlines()
        for line in lines:
            doc_content = doc_content + line
    print(f"原文本长度：{len(doc_content)}")

    if (len(doc_content) > 55000):
        doc_content = vectorSearch(path, question)
        print(f"向量搜索文本长度：{len(doc_content)}")

    prompt_text = prompt_template.format(context=doc_content,question=question)
    #print(prompt_text)

    result = answer(prompt_text)
    print(result)
    return result

    """
    chain = load_qa_chain(client, chain_type="stuff", prompt=PROMPT)
    output = chain({"input_documents": doc_list, "question": question}, return_only_outputs=True)
    #output=chain.run(input_documents=doc_list, question=question)
    print(output)
    answer = output['output_text']


    # 使用正则表达式找到第一个\n后面跟着一个英文字符的位置
    match = re.search(r'\n[a-zA-Z]', answer)
    # 如果找到了匹配项
    if match:
        # 获取匹配项的开始位置（索引），这个位置实际上是\n的位置
        index = match.start()
        # 输出\n之前的字符串
        return answer[:index]
    else:
        return answer
    """


class MyClassModel(BaseModel):
    question: str
    chapter: str


@app.get("/")
def read_root():
    return {"Hello": "World"}


@app.get("/reload/file")
def reload_file():
    return {"answer": "success"}


@app.post("/qwen2/api")
async def create_myclass(myclass: MyClassModel):
    t1 = datetime.now()
    question = myclass.question
    chapter = myclass.chapter
    print(f"问题：{question}")
    print(f"章节：{chapter}")

    answer = q_a2(question, chapter)

    t2 = datetime.now()
    second = (t1 - t2).total_seconds()
    print(f"耗时{second}")
    return {"answer": f"{answer}"}
    # return f"{answer}"


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8092)
