import logging
import uvicorn
from fastapi import FastAPI
from pydantic import BaseModel
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.document_loaders import TextLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
import multiprocessing
import gc
import torch_npu

# 设置日志记录
logging.basicConfig(level=logging.INFO)
# 创建一个全局锁
docsearch_lock = multiprocessing.Lock()
embedding_path = '/home/linweibin/liujian/model/zpoint_large_embedding_zh'


def create_docsearch_index(file_path, embedding_path):
    try:
        # 加载文档
        loader = TextLoader(file_path)
        documents = loader.load()

        # 分割文本
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=0, chunk_overlap=0, separators=["#####"])
        texts = text_splitter.split_documents(documents)

        # 创建嵌入
        embeddings = HuggingFaceEmbeddings(model_name=embedding_path)

        # 使用锁来确保进程安全
        with docsearch_lock:
            docsearch = FAISS.from_documents(texts, embeddings)

        return docsearch  # 返回 docsearch 实例

    except Exception as e:
        logging.error(f"Error in create_docsearch_index: {e}")
        raise

    finally:
        # 确保缓存和垃圾回收操作被执行
        torch_npu.npu.empty_cache()
        gc.collect()


def query_docsearch(docsearch, query):
    try:
        with docsearch_lock:
            results = docsearch.similarity_search(query, k=30)
            # print("results",results)
            concatenated_content = ""

            # 遍历结果，并将page_content拼接起来
            for result in results:
                concatenated_content += result.page_content
                print("concatenated_content", concatenated_content)
                # 如果需要在每个片段之间添加分隔符，可以取消下面一行的注释
                # concatenated_content += "\n---\n"  # 或者使用其他分隔符

        return concatenated_content
    except Exception as e:
        logging.error(f"Error in query_docsearch: {e}")
        raise


def prom(question, content):
    try:
        context = content
        prompt_template = (
            """我给你30个知识文本片段，片段用#####分隔开，还有一个相关的问题,请你根据片段中的原文回答我的问题,不能够省略，不能总结。如果遇到表格的内容，按照每一点原文罗列出来。如果你无法在知识文本片段中搜寻到问题的答案,只需要告诉我知识文本片段中无相关信息.\n问题：{question}\n知识文本片段：\n{context}        """)
        prompt_text = prompt_template.format(context=context, question=question)
        # print("prompt_text",prompt_text)
        # logging.info(f"Prompt text generated: {prompt_text}")
        return prompt_text
    except Exception as e:
        logging.error(f"Error in prom: {e}")
        raise


from openai import OpenAI


def chat(prompt):
    client = OpenAI(
        base_url="http://192.168.80.35:8000/v1",
        api_key="token-abc123",
    )

    completion = client.chat.completions.create(
        model="/home/zhengzhenzhuang/liujian/model/Qwen2.5-72B-Instruct-GPTQ-Int8",
        messages=[
            {"role": "user", "content": prompt}
        ]
    )

    return completion.choices[0].message.content


file = "init"
# 定义 FastAPI 应用程序实例
app = FastAPI()


# 定义接收请求体的数据模型
class Query(BaseModel):
    content: str
    file_path: str


@app.post("/tender")
def tender_bidder(query: Query):
    global file, docsearch
    logging.info(f"Received query: {query}")
    try:
        if query.file_path != file:
            file = query.file_path
            print("file新的", file)
            docsearch = create_docsearch_index(query.file_path, embedding_path)
        # print("file",file)
        results = query_docsearch(docsearch, query.content)
        prompt_text = prom(query.content, results)
        tender = chat(prompt_text)
        print("results", results)
        # print("回答",s)
        return {"result": tender, "fragment": results}
    except Exception as e:
        logging.error(f"An error occurred: {e}")
        return {"error": str(e)}
    finally:
        torch_npu.npu.empty_cache()
        gc.collect()
        logging.info("Cache cleared and garbage collection completed")


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=19298)
