import os
import json
import re
import random
import copy
from typing import List, Tuple
import warnings

import torch
from transformers import LlamaForCausalLM, LlamaTokenizer
from tqdm import tqdm

class LlamaEvaluator:
    def __init__(self, model:LlamaForCausalLM, tokenizer:LlamaTokenizer, fewshot_num=0, choices=['A', 'B', 'C', 'D']):

        self.choices = choices
        self.tokenizer = tokenizer
        self.model = model
        self.model.eval()
        self.fewshot_num = fewshot_num

        self.prompt_template_dict = {
            "prompt_with_fewshot":
            (
            "示例:\n"
            "{examples}\n\n"
            "以下是中国关于{subject_name}考试的单项选择题，请参考示例选出其中的正确答案。\n"
            "题目:\n"
            "{question}\n\n"
            "答案:\n"
            ),
            "prompt_with_zeroshot":
            (
            "以下是中国关于{subject_name}考试的单项选择题，请选出其中的正确答案。\n"
            "题目:\n"
            "{question}\n\n"
            "答案:\n"
            )
            }

        self.patterns = [
            "([ABCD])\.?.*",
            "答案是?\s?([ABCD])",
            "答案是?\s?：([ABCD])",
            "答案是?\s?:([ABCD])",
            "答案应该?是\s?([ABCD])",
            "答案应该?选\s?([ABCD])",
            "答案为\s?([ABCD])",
            "选择\s?([ABCD])",
            "只有选?项?\s?([ABCD])\s?是?对",
            "只有选?项?\s?([ABCD])\s?是?错",
            "只有选?项?\s?([ABCD])\s?不?正确",
            "只有选?项?\s?([ABCD])\s?错误",
            "说法不?对选?项?的?是\s?([ABCD])",
            "说法不?正确选?项?的?是\s?([ABCD])",
            "说法错误选?项?的?是\s?([ABCD])",
            "([ABCD])\s?是正确的",
            "([ABCD])\s?是正确答案",
            "选项\s?([ABCD])\s?正确",
            "所以答\s?([ABCD])",
            "1.\s?([ABCD])[.。$]?$",
            "所以\s?([ABCD][.。$]?$)",
            "所有\s?([ABCD][.。$]?$)",
            "[\s，：:,]([ABCD])[。，,\.]?$",
            "[\s，,：:][故即]([ABCD])[。\.]?$",
            "[\s，,：:]因此([ABCD])[。\.]?$",
            "[是为。]\s?([ABCD])[。\.]?$",
            "因此\s?([ABCD])[。\.]?$",
            "显然\s?([ABCD])[。\.]?$",
            "1.\s?(.*?)$",
            "答案是\s?(\S+)(?:。|$)",
            "答案应该是\s?(\S+)(?:。|$)",
            "答案为\s?(\S+)(?:。|$)",
        ]

    @staticmethod
    def build_examples_inputs(examples: List[Tuple[str]]):
        inputs_text = ""
        
        for example in examples:
            example_text = (
                "题目：{}\n"
                "答案：{}\n"
            ).format(example[0], example[1])
            inputs_text += example_text

        return inputs_text

    
    def build_prompt(self, subject_name:str, question:str, examples=None) -> str:
        
        if examples:
            prompt_template = self.prompt_template_dict["prompt_with_fewshot"]
            examples = self.build_examples_inputs(examples)
            return prompt_template.format_map({ "examples" : examples, "subject_name" : subject_name, "question" : question })
        else:
            prompt_template = self.prompt_template_dict["prompt_with_zeroshot"]
            return prompt_template.format_map({ "subject_name" : subject_name, "question" : question })

    @staticmethod
    def extract_option_in_answer(answer:str, patterns: List[str]):
        regexes = [re.compile(pattern) for pattern in patterns]
        for regex in regexes:
            match = regex.search(answer)
            if match:
                return match.group(1)
        return None

    def eval_subject(self, subject_path, save_result_dir=None):

        correct_num = 0
        total_num = 0

        test_results = []
        subject_name = os.path.basename(subject_path)

        for exam_paper_file in tqdm(os.listdir(subject_path), postfix=f"{subject_name}"):

            full_filename = os.path.join(subject_path, exam_paper_file)
            if not (os.path.isfile(full_filename) and (exam_paper_file.split('.')[-1] in ['json', 'jsonl'])):
                warnings.warn(f"There isn't a test file under {subject_path}.\nThe filetree maybe wrong.", UserWarning)
                continue

            with open(full_filename, 'r', encoding='utf-8') as fp:
                    subject_tests = [json.loads(jsonl) for jsonl in fp.readlines()]
                    
                    for i in range(len(subject_tests)):

                        candidate_tests = copy.copy(subject_tests)  # 深拷贝，去除测试样本后抽样
                        a_subject_test = candidate_tests.pop(i)

                        question = a_subject_test["inputs_pretokenized"]
                        target_option = a_subject_test["targets_pretokenized"][0]

                        if self.fewshot_num > 0 :
                            assert self.fewshot_num < len(candidate_tests), "There must be at least one question used to test."
                            examples = [(example["inputs_pretokenized"], example["targets_pretokenized"][0]) for example in random.choices(candidate_tests, k=self.fewshot_num)]
                        else:
                            examples = None

                        eval_prompt = self.build_prompt(subject_name, question, examples)
                        model_inputs = self.tokenizer(eval_prompt, return_tensors="pt").to("cuda")
                        with torch.no_grad():
                            raw_output_ids = self.model.generate(**model_inputs, max_new_tokens=100)

                        # 不输出输入的题目（只要答案）
                        inputs_mask = model_inputs['attention_mask'] > 0
                        output_mask = torch.zeros(raw_output_ids.size()).to(inputs_mask)
                        output_mask[:, :inputs_mask.size(1)] = inputs_mask
                        masked_prompt_output = raw_output_ids  # 只是为了代码看起来协调一点
                        masked_prompt_output[output_mask] = self.tokenizer.pad_token_id
                        answer = self.tokenizer.decode(masked_prompt_output[0], skip_special_tokens=True)

                        predict_option = self.extract_option_in_answer(answer, self.patterns)
                        print("Predict:", predict_option, "Answer: ", target_option)

                        if predict_option == target_option:
                            correct_num += 1;
                        
                        a_test_result = copy.copy(a_subject_test)
                        a_test_result['examples'] = examples
                        a_test_result['predict_option'] = predict_option

                        test_results.append(a_test_result)
                    
                    total_num += len(subject_tests)

            if save_result_dir:
                if not os.path.exists(save_result_dir):
                    os.makedirs(save_result_dir)
                result_filename = os.path.join(save_result_dir, exam_paper_file)
                with open(result_filename, 'w', encoding='utf-8') as fp:
                    fp.writelines(map(lambda t: json.dumps(t, ensure_ascii=False) + '\n', test_results))
        
        return correct_num / total_num
    
    def evaluate_subjects(self, subjects_dir:str, save_result_dir:str =None):
        """_summary_
        Args:
            subjects_dir (str): the data path of test subjects like: `subjects_path/subject/test_file1.jsonl`
            save_result_dir (str, optional): If save the test result. Defaults to None.
        """
        test_name = os.path.basename(subjects_dir)
        results = dict()

        for subject_dir in os.listdir(subjects_dir):
            full_subject_dir = os.path.join(subjects_dir, subject_dir)
            if not os.path.isdir(full_subject_dir):
                warnings.warn(f"There is a common file under {subjects_dir}.\nThe filetree may be wrong.", UserWarning)
                continue
            subject_name = os.path.basename(full_subject_dir)
            subject_output_dir =  os.path.join(save_result_dir, test_name, subject_name) if save_result_dir else None
            subject_result = self.eval_subject(full_subject_dir, subject_output_dir)
            results[subject_name] = subject_result if subject_result else 0.0
        
        if save_result_dir:
            test_output_filename =  os.path.join(save_result_dir, test_name, "metrics.json")
            with open(test_output_filename, 'w', encoding='utf-8') as fp:
                fp.writelines(json.dumps(results))