#!/usr/bin/env python
# coding: utf-8

import json
import threading
import time
import warnings
from concurrent.futures import ThreadPoolExecutor, as_completed
from itertools import count
from queue import Queue
from typing import Any, Dict

from langchain import PromptTemplate
from langchain.chains import RetrievalQA

from bm25_retriever import BM25
from faiss_retriever import FaissRetriever
from pdf_parse import DataProcess
from rerank_model import ReRankLLM
from utils.utils import get_path, logger, timer, torch_gc
from vllm_model import LLMClient

warnings.filterwarnings("ignore")
llm = None

llm_name = "Qwen/Qwen2.5-7B-Instruct"
# llm_name = "Qwen/Qwen2.5-0.5B-Instruct"


def get_llm_instance(model_path=llm_name):
    try:
        print("Loading LLM...")
        llm_instance = LLMClient(model_path)
        print("LLM loaded.")
        return llm_instance
    except Exception as e:
        logger.error(f"Failed to load LLM: {e}")
        raise


# 用于发送推理任务给 LLM Worker
inference_queue = Queue()

# 存放推理结果的字典，key 是任务ID，value 是结果列表
inference_results = {}
inference_lock = threading.Lock()
# ✅ 新增：用于通知 LLM Worker 已退出
llm_worker_done_event = threading.Event()
_task_id_generator = count()
_task_id_lock = threading.Lock()


def get_next_task_id():
    with _task_id_lock:
        return next(_task_id_generator)


def llm_worker():
    global llm
    print(f"[LLM Worker] Worker started on thread: {threading.current_thread().name}")
    try:
        while True:
            task_id, batch_input = inference_queue.get()
            if batch_input is None:
                print("[LLM Worker] Received shutdown signal.")
                break

            print(
                f"[LLM Worker] Processing task {task_id} with {len(batch_input)} prompts"
            )
            for i, prompt in enumerate(batch_input):
                print(f"  Prompt {i+1} (first 200 chars):\n  {prompt[:200]}...")

            try:
                result = llm.infer(batch_input)
                torch_gc()
                print(f"[LLM Worker] Inference completed for task {task_id}. Results:")
                for i, res in enumerate(result):
                    print(f"  Response {i+1}:\n  {res}")

                with inference_lock:
                    inference_results[task_id] = result
                print(f"[LLM Worker] Task {task_id} result stored.")

            except Exception as e:
                logger.error(f"LLM inference error: {e}")
                with inference_lock:
                    inference_results[task_id] = ["模型推理错误"] * len(batch_input)
                print(f"[LLM Worker] Task {task_id} failed, stored error responses.")
            finally:
                inference_queue.task_done()

    finally:
        # ✅ 无论是否异常，都标记 worker 已退出
        llm_worker_done_event.set()
        print("[LLM Worker] Exited.")


# 获取Langchain的工具链
@timer
def get_qa_chain(llm, vector_store, prompt_template):
    prompt = PromptTemplate(
        template=prompt_template, input_variables=["context", "question"]
    )
    return RetrievalQA.from_llm(
        llm=llm,
        retriever=vector_store.as_retriever(search_kwargs={"k": 10}),
        prompt=prompt,
    )


# 构造提示，根据merged faiss和bm25的召回结果返回答案
def get_emb_bm25_merge(faiss_context, bm25_context, query):
    max_length = 2500
    emb_ans = ""
    cnt = 0
    for doc, score in faiss_context:
        cnt = cnt + 1
        if cnt > 6:
            break
        if len(emb_ans + doc.page_content) > max_length:
            break
        emb_ans = emb_ans + doc.page_content
    bm25_ans = ""
    cnt = 0
    for doc in bm25_context:
        cnt = cnt + 1
        if len(bm25_ans + doc.page_content) > max_length:
            break
        bm25_ans = bm25_ans + doc.page_content
        if cnt > 6:
            break

    prompt_template = """
        基于以下已知信息，简洁和专业的来回答用户的问题。
        如果无法从中得到答案，请说 "无答案"或"无答案"，不允许在答案中添加编造成分，答案请使用中文。
        已知内容为吉利控股集团汽车销售有限公司的吉利用户手册:
        1: {emb_ans}
        2: {bm25_ans}
        问题:
        {question}
    """.format(
        emb_ans=emb_ans, bm25_ans=bm25_ans, question=query
    )
    return prompt_template


def get_rerank_prompt(emb_ans, query):

    prompt_template = """
        基于以下已知信息，简洁和专业的来回答用户的问题。
        如果无法从中得到答案，请说 "无答案"或"无答案" ，不允许在答案中添加编造成分，答案请使用中文。
        已知内容为吉利控股集团汽车销售有限公司的吉利用户手册:
        1: {emb_ans}
        问题:
        {question}
    """.format(
        emb_ans=emb_ans, question=query
    )
    return prompt_template


@timer
def re_rank(rerank, top_k, query, bm25_ans, faiss_ans):
    items = []
    max_length = 4000
    for doc, score in faiss_ans:
        items.append(doc)
    items.extend(bm25_ans)
    rerank_ans = rerank.predict(query, items)
    rerank_ans = rerank_ans[:top_k]
    # docs_sort = sorted(rerank_ans, key = lambda x:x.metadata["id"])
    emb_ans = ""
    for doc in rerank_ans:
        if len(emb_ans + doc.page_content) > max_length:
            break
        emb_ans = emb_ans + doc.page_content
    return emb_ans


def process_single_question(
    item: Dict[str, Any], faiss_retriever, bm25_retriever, rerank_model
) -> Dict[str, Any]:
    query = item["question"]
    max_length = 4000
    # Faiss 检索
    faiss_context = faiss_retriever.get_topk(query, 15)
    faiss_min_score = faiss_context[0][1] if faiss_context else float("inf")

    emb_ans = ""
    for i, (doc, score) in enumerate(faiss_context):
        if i >= 6 or len(emb_ans + doc.page_content) > max_length:
            break
        emb_ans += doc.page_content

    # BM25 检索
    bm25_context = bm25_retriever.get_bm25_topk(query, 15)
    bm25_ans = ""
    for i, doc in enumerate(bm25_context):
        if i >= 6 or len(bm25_ans + doc.page_content) > max_length:
            break
        bm25_ans += doc.page_content

    # 多路融合 Prompt
    merge_prompt = get_emb_bm25_merge(faiss_context, bm25_context, query)
    bm25_prompt = get_rerank_prompt(bm25_ans, query)
    emb_prompt = get_rerank_prompt(emb_ans, query)

    # Rerank 融合
    rerank_content = re_rank(rerank_model, 6, query, bm25_context, faiss_context)
    rerank_prompt = get_rerank_prompt(rerank_content, query)

    batch_input = [merge_prompt, bm25_prompt, emb_prompt, rerank_prompt]

    # --- 发送任务到 LLM Worker ---
    task_id = get_next_task_id()
    inference_queue.put((task_id, batch_input))

    # --- 等待 LLM Worker 返回结果 ---
    while True:
        with inference_lock:
            if task_id in inference_results:
                batch_output = inference_results.pop(task_id)
                break
        time.sleep(0.01)  # 避免忙等

    # 构建结果
    result_item = item.copy()
    result_item["answer_1"] = batch_output[0]  # merge
    result_item["answer_2"] = batch_output[1]  # bm25
    result_item["answer_3"] = batch_output[2]  # emb
    result_item["answer_4"] = batch_output[3]  # rerank
    result_item["answer_5"] = (
        "无答案" if faiss_min_score > 500 else str(faiss_min_score)
    )
    result_item["answer_6"] = bm25_ans
    result_item["answer_7"] = rerank_content

    return result_item


def main():
    start = time.time()
    m3e_model = "moka-ai/m3e-large"
    reranker_model = "BAAI/bge-reranker-large"
    print("Loading PDF and processing data...")
    pdf_path = get_path("data/train_a.pdf")
    data_process = DataProcess(pdf_path)
    data_process.parse_blocks(max_seq=1024)
    data_process.parse_blocks(max_seq=512)
    logger.info(len(data_process.data))
    data_process.parse_all_page(max_seq=256)
    data_process.parse_all_page(max_seq=512)
    logger.info(len(data_process.data))
    data_process.parse_one_page_with_rule(max_seq=256)
    data_process.parse_one_page_with_rule(max_seq=512)
    logger.info(len(data_process.data))
    data = data_process.data
    print("Data load complete.")
    print("Building Faiss index...")
    faiss_retriever = FaissRetriever(m3e_model, data)
    print("Faiss index built.")

    print("Building BM25 index...")
    bm25_retriever = BM25(data)
    print("BM25 index built.")

    global llm
    llm = get_llm_instance()
    print("Starting LLM worker thread...")
    worker_thread = threading.Thread(target=llm_worker, daemon=True)
    worker_thread.start()
    print(f"LLM worker started on thread: {worker_thread.name}")

    print("Loading Rerank model...")
    rerank_model = ReRankLLM(reranker_model)
    print("Rerank model loaded.")
    test_question = get_path("data/test_question.json")
    with open(test_question, "r", encoding="utf-8") as f:
        questions = json.load(f)
    print(f"Loaded {len(questions)} questions.")
    results = []
    with ThreadPoolExecutor(max_workers=4) as executor:
        futures = [
            executor.submit(
                process_single_question,
                question,
                faiss_retriever,
                bm25_retriever,
                rerank_model,
            )
            for question in questions
        ]

        for future in as_completed(futures):
            try:
                result = future.result()
                results.append(result)
            except Exception as e:
                print(f"Error processing question: {e}")

    # ✅ 发送关闭信号
    inference_queue.put((None, None))

    # ✅ 等待 LLM Worker 退出（最多 120 秒）
    if llm_worker_done_event.wait(timeout=120):
        print("[Main] LLM worker exited gracefully.")
    else:
        print("[Main] Warning: LLM worker did not exit within 120 seconds.")

    # ✅ 最后确认队列已清空（可选）
    if inference_queue.unfinished_tasks > 0:
        print(f"[Main] Warning: {inference_queue.unfinished_tasks} tasks unfinished.")

    result_json = get_path("data/result.json")
    with open(result_json, "w", encoding="utf-8") as f:
        json.dump(results, f, ensure_ascii=False, indent=4)

    end = time.time()
    print(f"Total time: {end - start:.2f} seconds ({(end - start) / 60:.2f} minutes)")


if __name__ == "__main__":
    main()
    # # nohup python run.py > run.log 2>&1 &
