import json
import os
import re

from model_api import qwen_api_no_search, zhipu_api_no_search
from tqdm import tqdm

path_dir = os.path.dirname(os.path.abspath(__file__))

system_prompt = """
你是一位金融出题专家，专注于基于给定的数据进行出题。
我会给你一些信息，包含了:
- 一个专有名词（prop）
- 围绕这个专有名词的一个问题（question）
- 对应这个问题的答案（answer）

你的任务是增广这个问题，生成5个扩展问题。
你要思考，确保验证扩展问依然能够使用原始答案进行回答。

你可以先进行一些思考，然后输出。输出格式固定为List[str]输出，你无需重复输出answer。
示例如下：

```json
[
  "我听说净值增长率，这是啥意思",
  "什么是净值增长率？",
  "...",
  "...",
  "..."
]
```
"""

user_prompt = """
请你基于以下的json，帮我增广题目：
{json_str}
"""


def get_expand_question(prop, json_data, model='glm-4-plus'):
    json_data.pop("rule")
    json_data.pop("type")
    json_data['prop'] = json_data.pop("meta")
    json_str = json.dumps(json_data, ensure_ascii=False, indent=4)

    convs = [
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": user_prompt.format(json_str=json_str)}
    ]
    if 'qwen' in model.lower():
        llm_ans = qwen_api_no_search.qwen_call(model=model, convs=convs)
    elif 'glm' in model.lower():
        llm_ans = zhipu_api_no_search.zhipu_call(model=model, convs=convs)
    else:
        raise ValueError
    llm_ans['prop'] = prop
    return llm_ans


def call_llm_main(question_data):
    prop = question_data['meta']
    rule = question_data['rule']
    type = question_data['type']
    llm_ans = get_expand_question(prop, question_data)
    llm_ans.pop('search')
    llm_ans.pop('thinking')
    llm_ans.pop('usage')
    temp_ans = llm_ans.pop('answer')
    temp_ans = re.search('```json(.+?)```', temp_ans, re.S|re.I).group(1)
    try:
        llm_ans['new_questions'] = json.loads(temp_ans)
    except json.decoder.JSONDecodeError:
        print('JSON ERROR:', temp_ans)
        llm_ans['new_questions'] = []
    llm_ans['question'] = question_data['question']
    llm_ans['answer'] = question_data['answer']
    llm_ans['rule'] = rule
    llm_ans['type'] = type
    llm_ans['meta'] = question_data.pop("prop")

    llm_ans_str = json.dumps(llm_ans, ensure_ascii=False) + '\n'
    return llm_ans_str


if __name__ == '__main__':
    phase4_model = 'qwen3-max'
    model = 'glm-4-plus'
    with open(os.path.join(path_dir, "phase4_output", f"{phase4_model}_questions.jsonl"), 'r', encoding='utf-8') as fp:
        lines = fp.readlines()
        questions_data = [json.loads(line) for line in lines]

    from concurrent.futures import ProcessPoolExecutor
    with ProcessPoolExecutor(max_workers=20) as executor:
        llm_ans_list = executor.map(call_llm_main, questions_data)

    # llm_ans_list = []
    # for question_data in questions_data:
    #     llm_ans_list.append(call_llm_main(question_data))

    output_fp = open(os.path.join(path_dir, "phase5_output", f"{model}_expand_questions.jsonl"), 'w', encoding='utf-8')
    for llm_ans in llm_ans_list:
        output_fp.write(llm_ans)
    output_fp.close()
