from model import Exam, Question, ExamResult, ExamResultCollection, Answer, ExamCollection

from openai import OpenAI
from argparse import ArgumentParser, Namespace
from typing import List, Any, Dict, Tuple, Optional
from multiprocessing import Pool, set_start_method, current_process
from loguru import logger
from os import path
from time import sleep


SYSTEM_PROMPT = """
你是一个答题助手，请根据用户提问的题目给出正确答案。
"""

PROMPT_TEMPLATE = """
题目描述如下：{question}。
本题目题型为：{type}。

请根据以上题目描述选出正确答案。
"""

client: OpenAI


def parse_args() -> Namespace:
    parser = ArgumentParser()
    parser.add_argument("--api-base", type=str)
    parser.add_argument("--api-key", type=str, default='sk-1234')
    parser.add_argument("--model-name", type=str)
    parser.add_argument("--rerun-failed", action='store_true')
    parser.add_argument("--output", type=str)
    parser.add_argument("-j", "--nworkers", type=int, default=1)
    return parser.parse_args()


def __rmthink(content: str) -> Tuple[str, str]:
    pre, temp = content.split('<think>', 1)
    if '</think>' in temp:
        _, post = temp.rsplit('</think>', 1)
        return pre + post
    else:
        logger.debug(f"think label mismatch")
        return temp


def ask(exam_id: int, question: Question, model_name: str) -> Answer:
    type = question.type + "，"
    type += '单选题' if len(question.answer) == 1 else '多选题'
    answer = Answer(exam_id=exam_id, question_id=question.id)
    messages: List[Dict[str, str]] = [
        {"role": "system", "content": SYSTEM_PROMPT},
        {"role": "user", "content": PROMPT_TEMPLATE.format(question=question.content, type=type)}
    ]
    resp = client.chat.completions.create(
        model=model_name, messages=messages,
        max_completion_tokens=8192,
        temperature=0.6
        )

    if resp.choices[0].finish_reason != 'stop':
        raise Exception(f"unexpected stop reason {resp.choices[0].finish_reason}")

    answer.token_cost += resp.usage.total_tokens
    content = resp.choices[0].message.content
    raw_content = content
    if '<think>' in content:
        content = __rmthink(content)

    messages.append({"role": "assistant", "content": content, "raw_content": raw_content})
    messages.append({"role": "user", "content": "简单总结正确答案，你应当仅输出一行来列出正确的选项，不要输出其他内容。"})

    resp = client.chat.completions.create(model=model_name, messages=messages, max_completion_tokens=8192)
    if resp.choices[0].finish_reason != 'stop':
        raise Exception(f"unexpected stop reason {resp.choices[0].finish_reason}")

    answer.token_cost += resp.usage.total_tokens
    content = resp.choices[0].message.content
    raw_content = content
    if '<think>' in content:
        content = __rmthink(content)

    messages.append({"role": "assistant", "content": content, "raw_content": raw_content})
    answer_body = messages[-1]['content']

    answer.options = set(filter(lambda a: a in ('A', 'B', 'C', 'D'), answer_body))
    answer.correct = answer.options == set(question.answer)
    answer.messages = messages
    return answer


def ask_wrap(exam_id: int, question: Question, model_name: str) -> Optional[Answer]:
    pid = current_process().pid
    for _ in range(10):
        try:
            answer = ask(exam_id, question, model_name)
            correct_label = '✅' if answer.correct else '❌'
            hint = f", answer is {question.answer}" if not answer.correct else ''
            logger.info(f"process {pid}, {exam_id}-{question.id:2d} " +
                        f"({question.type:20s}): {correct_label} {answer.options}{hint}")
            return answer
        except Exception as ex:
            if 'rate_limit_reached_error' in str(ex) or \
                    'rate limiting' in str(ex) or \
                    'rate_limited' in str(ex) or \
                    'service overloaded' in str(ex) or \
                    'rate_limit_error' in str(ex):
                logger.warning(f"rate limitation exceed, sleep and retry")
                sleep(10)
                continue
            elif '504 Gateway Time-out' in str(ex):
                logger.warning(f"connection error (gateway timeout)")
                return None
            elif 'Connection error' in str(ex):
                logger.warning(f"connection error, sleep and retry")
                sleep(2)
                continue
            else:
                logger.warning(f"process {pid}, {exam_id}-{question.id:2d} ({question.type:20s}): failed because {ex}")
                return None


def evaluate(exams: ExamCollection, model_name: str, nworkers: int,
             collection: ExamResultCollection, rerun_failed: bool) -> None:

    tasks: List[Tuple[int, Question, str]] = []
    for exam in exams.exams:
        if rerun_failed:
            exam_result = collection.results[exam.id]
        else:
            exam_result = ExamResult(exam_id=exam.id)
            collection.results[exam.id] = exam_result

        for question in exam.questions:
            if rerun_failed:
                if question.id not in exam_result.answers:
                    # 在重跑失败用例的模式下，只有之前没有返回结果的题目才会被重新运行
                    tasks.append((exam.id, question, model_name))
            else:
                tasks.append((exam.id, question, model_name))

    set_start_method('fork')
    ncorrect = 0
    with Pool(nworkers,) as p:
        for answer in p.starmap(ask_wrap, tasks):
            if answer is None:
                continue
            if answer.correct:
                ncorrect += 1

            collection.results[answer.exam_id].answers[answer.question_id] = answer

    logger.info(f"all done, correctness {ncorrect} in {len(tasks)} total ({ncorrect / len(tasks):.2f});)")

def main() -> None:
    global client
    args = parse_args()
    client = OpenAI(api_key=args.api_key, base_url=args.api_base)
    model_name = args.model_name
    with open('data.json', 'r') as fdata:
        exams = ExamCollection.model_validate_json(fdata.read())

    if args.output:
        result_path = args.output
    else:
        folder = path.dirname(path.realpath(__file__))
        result_path = path.join(folder, "results", f"{model_name}.json")

    if args.rerun_failed:
        if not path.isfile(result_path):
            logger.critical(f"--rerun-failed specified but existing result not found")
            raise SystemExit(1)

        with open(result_path, "r") as fresult:
            collection = ExamResultCollection.model_validate_json(fresult.read())
    else:
        collection = ExamResultCollection()

    evaluate(exams, model_name, args.nworkers, collection, args.rerun_failed)

    with open(result_path, "w+") as fresult:
        fresult.write(collection.model_dump_json(indent=2))


if __name__ == "__main__":
    main()
