import json
import random
import sys
import time

from tqdm import tqdm
from dalchemy.data import TextHelper
from dalchemy.llms import AzureLLM, OpenAILLM, ZhipuLLM


helper = TextHelper()

# 常量
PROMPT_Q = "根据上述文本中与医疗领域相关的内容与逻辑关系提出几个中文问题,注意,提出的问题应该提供充实的内容,使问题具有挑战性。"
PROMPT_A_PREFIX = "请回答如下1个问题,注意生成的答案应该条理清晰,包含充实的内容,包括你自身的知识以及段落信息:"
START = 0
END = -1


def get_history(passage, passage2q_list):
    """根据段落,和段落到问题列表的字典,生成history"""
    history = []
    prompt = f"{passage}\n\n{PROMPT_Q}"
    history.append({"role": "user", "content": prompt})

    q_ls = passage2q_list[passage]
    q_ls = [f"{i}. {q}" for i, q in enumerate(q_ls, start=1)]
    response = "\n".join(q_ls)

    history.append({"role": "assistant", "content": response})
    return history


def generate_prompt_batches(passage2q_list, start=0, end=-1):
    if end == -1:
        end = len(passage2q_list)

    for pid, (passage, q_list) in enumerate(passage2q_list.items()):
        if pid < start:
            continue

        if pid >= end:
            break

        prompt_batch = []
        for i, q in enumerate(q_list, start=1):
            history = get_history(passage, passage2q_list)
            query = f"{PROMPT_A_PREFIX} {q}"
            history.append({"role": "user", "content": query})

            prompt = AzureLLM.encode_chat_history(history)
            prompt_batch.append(prompt)

        yield prompt_batch


def main():
    global llm
    # infile = 'data/q_ls.json'
    # outfile = 'data/qa_ls.json'
    infile = sys.argv[1]
    outfile = sys.argv[2]
    data_ls = helper.read_json(infile)
    passage2q_list = {}

    def _text_postprocess(text):
        text = text.replace("\"", "").strip()
        return text
    
    # 构建段落->问题映射
    for data in tqdm(data_ls):
        question = data["query"].strip()
        passage = str(data["pid"])
        if question not in passage2q_list.get(passage, []):
            passage2q_list[passage] = passage2q_list.get(passage, []) + [question]

    qid = 0
    for prompt_batch in generate_prompt_batches(passage2q_list, start=START, end=END):
        # 生成prompt
        q_list = [AzureLLM.prompt_to_chatml(prompt)[-1] for prompt in prompt_batch]  
        q_list = [q["content"].replace(PROMPT_A_PREFIX, "") for q in q_list]
        passage_list = [AzureLLM.prompt_to_chatml(prompt)[0] for prompt in
                        prompt_batch] 

        passage_list = [p["content"].replace(PROMPT_Q, "").strip() for p in passage_list]

        # 调用API
        num_procs = len(prompt_batch)
        step_results = llm.generate(prompt_batch, is_chat=True, num_procs=num_procs)

        # 抽取qa对
        res = []
        for q, a, p in zip(q_list, step_results, passage_list):
            record = {"qid": str(qid), "query": _text_postprocess(q), "answer": _text_postprocess(a), "pid": str(p)}
            res.append(json.dumps(record, ensure_ascii=False))
            qid += 1

        # 保存结果
        helper.write_lines_append(res, outfile, sep="\n")
        time.sleep(10)


if __name__ == '__main__':
    # azure_cfg = {
    #     "engine": "gpt-35-turbo-16k",
    #     "api_type": "azure",
    #     "api_base": "https://your_base",
    #     "api_version": "2023-05-15",
    #     "api_key": "your_key",
    #     "max_tokens": 8192
    # }
    # llm = AzureLLM(**azure_cfg)

    cfg = {
        "engine": "chatglm_pro",
        "api_key": "your_zhipu_key"
    }
    llm = ZhipuLLM(**cfg)
    main()
