import os
from datetime import datetime

import uvicorn
from fastapi import FastAPI
from langchain.document_loaders import TextLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from pydantic import BaseModel

EMBEDDING_DEVICE = "npu"
app = FastAPI()

from openai import OpenAI

client = OpenAI(
    base_url="http://192.168.80.35:8000/v1",
    api_key="token-abc123",
)


def answer(prompt):
    completion = client.chat.completions.create(
        # model="/home/zhengzhenzhuang/liujian/model/Qwen2.5-72B-Instruct-GPTQ-Int8",
        model="/home/zhengzhenzhuang/liujian/model/Qwen2-72B-Instruct-GPTQ-Int8",
        # model="/home/zhengzhenzhuang/liujian/model/Qwen2-7B-Instruction",
        messages=[
            {"role": "user", "content": prompt}
        ],
        temperature=0,

        # stream=True
    )
    return completion.choices[0].message.content


def find_txt_files_in_dir(directory):
    file_list = []
    # 获取指定目录下的所有文件和文件夹名
    files_in_dir = os.listdir(directory)
    # 使用列表推导式过滤出 .txt 文件
    txt_files = [f for f in files_in_dir if f.endswith('.txt')]
    # 打印出 .txt 文件的完整路径（仅包括文件名，因为不遍历子文件夹）
    for txt_file in txt_files:
        file_list.append(os.path.join(directory, txt_file))
    return file_list


def load_file(filepath):
    loader = TextLoader(filepath, autodetect_encoding=True)
    # textsplitter = ChineseTextSplitter(pdf=False)
    # textsplitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)  # ok
    textsplitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=50)  # 导致显存不足
    # textsplitter = TokenTextSplitter(chunk_size=1000, chunk_overlap=100)

    docs = loader.load_and_split(textsplitter)
    # write_check_file(filepath, docs)
    return docs


def vectorSearch(filepath, question):
    print("向量搜索")
    print("开始加载知识库")
    load_time = datetime.now()
    docs = load_file(filepath)

    EMBEDDING_MODEL = 'bge-large-zh-v1.5'
    embedding_model_dict = {
        "bge-large-zh-v1.5": "/home/zhengzhenzhuang/models/qwen/bge-large-zh-v1.5",
    }
    embeddings = HuggingFaceEmbeddings(model_name=embedding_model_dict[EMBEDDING_MODEL],
                                       model_kwargs={'device': EMBEDDING_DEVICE})
    # embedding = HuggingFaceEmbeddings(model_name='/home/zhengzhenzhuang/models/qwen/bge-large-zh-v1.5')
    # 创建向量数据库
    db = FAISS.from_documents(docs, embeddings)
    # 保存
    db.save_local("/home/zhengzhenzhuang/models/tender/document/db")
    '''
    如果已经创建好了，可以直接读取
    db = FAISS.load_local("/home/zhengzhenzhuang/models/tender/document/db", embeddings)
    '''

    # 传入向量去搜索
    embedding_vector = embeddings.embed_query(question)
    docs = db.similarity_search_by_vector(embedding_vector, k=20)

    text_content = ""
    for doc in docs:
        text_len = f"==================={len(doc.page_content)}======================"
        print(text_len)
        text_content = text_content + doc.page_content

    second = (load_time - datetime.now()).total_seconds()
    print(f"加载知识库耗时{second}")
    return text_content


def q_a2(question, filepath, type):
    question = "请用原文回答" + question
    prompt_template = """我给你一个知识文本,以及一个相关的问题,请你根据知识文本的原文回答我的问题,原文的内容要全部输出来，不能够省略，不能总结。如果遇到表格的内容，按照每一点原文罗列出来。如果你无法搜寻到问题的答案,只需要告诉我你不知道答案就行.
                 问题为:{question},
                 知识文本为:{context}
                 """

    file_list = find_txt_files_in_dir(filepath)
    doc_content = ""
    doc_content_all = ""
    for path in file_list:
        file_name = os.path.basename(path)
        if (len(file_name) > 6):
            continue
        prefix, ext = os.path.splitext(file_name)
        # print(prefix)  # 输出: zzz
        # print()
        # 1 招标  2 投标
        if type == 1:
            if int(prefix) >= 18:
                continue
        else:
            if int(prefix) >= 18:
                continue
        print(path)
        file_content = ""
        with open(path, 'r', encoding='utf-8') as file:
            # 读取所有行到一个列表中
            lines = file.readlines()
            for line in lines:
                file_content = file_content + line
        print(f"原文本长度：{len(file_content)}")
        n = len(file_content.replace(" ", ""))
        if n > 30000:
            print("去除空格符长度超过30000，采用向量搜索")
            new_file_content = vectorSearch(path, question)
            file_content=new_file_content

        prompt_text = prompt_template.format(context=file_content, question=question)
        # print(prompt_text)
        output = answer(prompt_text)
        # print(output)
        # if "无法回答" in output:
        if "无法" in output:
            print("")
        else:
            doc_content = doc_content + "\n" + output
        doc_content_all = doc_content_all + "\n\n" + path + "\n" + output

    w_path = filepath + "/" + question + ".txt"
    w_path_all = filepath + "/" + question + "-All.txt"
    wf = open(w_path, "w", encoding="UTF-8")
    wf.write(doc_content)
    wf.flush()
    wf.close()

    wf = open(w_path_all, "w", encoding="UTF-8")
    wf.write(doc_content_all)
    wf.flush()
    wf.close()

    if len(doc_content) > 130000:
        print("============文件超过130000,截取前130000==============")
        doc_content = doc_content[:130000]

    prompt_text = prompt_template.format(context=doc_content, question=question)
    # print(prompt_text)
    result = "无相关内容"
    if len(doc_content) > 10:
        result = answer(prompt_text)
    print(result)
    return result


class MyClassModel(BaseModel):
    question: str
    file_path: str
    type: int


@app.post("/tender")
def create_myclass(myclass: MyClassModel):
    t1 = datetime.now()
    question = myclass.question
    file_path = myclass.file_path
    type = myclass.type
    print(f"问题：{question}")
    print(f"类型：{type}")

    answer = q_a2(question, file_path, type)

    t2 = datetime.now()
    second = (t1 - t2).total_seconds()
    print(f"耗时{second}")
    # return {"answer": f"{answer}"}
    # return f"{answer}"
    return answer


@app.post("/invite/tender")
def create_myclass(myclass: MyClassModel):
    t1 = datetime.now()
    question = myclass.question
    file_path = myclass.file_path
    print(f"问题：{question}")

    # print(f"路径：{file_path}")

    answer = q_a2(question, file_path, type)

    t2 = datetime.now()
    second = (t1 - t2).total_seconds()
    print(f"耗时{second}")
    # return {"answer": f"{answer}"}
    # return f"{answer}"
    return answer


@app.post("/submit/tender")
def create_myclass(myclass: MyClassModel):
    t1 = datetime.now()
    question = myclass.question
    file_path = myclass.file_path
    print(f"问题：{question}")
    # print(f"路径：{file_path}")

    answer = q_a2(question, file_path, type)

    t2 = datetime.now()
    second = (t1 - t2).total_seconds()
    print(f"耗时{second}")
    # return {"answer": f"{answer}"}
    # return f"{answer}"
    return answer


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=19309)
