import string
from typing import List, Tuple

DEFAULT_PROMPT_TEMPLATE_DICT={
    'answer_prompt_with_inputs':
    (
        "输入: \n{inputs}\n"
        "题目: \n{instruction}\n"
        "答案: \n{answer}\n"
    ),
    'answer_prompt_without_inputs':
    (
        "题目: \n{instruction}\n"
        "答案: \n{answer}\n"
    ),
    'instruction_prompt_with_inputs':
    (
        "输入: \n{inputs}\n"
        "题目: \n{instruction}\n"
        "答案: \n"
    ),
    'instruction_prompt_without_inputs':
    (
        "题目: \n{instruction}\n"
        "答案: \n"
    ),
}


class Evaluator:
    def __init__(self, model_name, choices, fewshot_num=0, prompt_templat_dict=DEFAULT_PROMPT_TEMPLATE_DICT):

        self.model_name = model_name
        self.choices = choices
        self.fewshot_num = fewshot_num
        self.prompt_template_dict = prompt_templat_dict

    @staticmethod
    def format_answer(answer):

        def white_space_fix(text):
            return ' '.join(text.split())

        def remove_punc(text):
            exclude=set(string.punctuation)
            return ''.join(ch for ch in text if ch not in exclude)

        def lower(text):
            return text.lower()

        return white_space_fix(remove_punc(lower(answer)))

    def build_prompt(self, instruction, inputs=None, answer=None):
        """Build the evalution prompt

        Args:
            instruction (str): _description_
            inputs (List[Tuple(str)], optional): The list contains examples in the format of (question, answer). Defaults to None.
            answer (str, optional): _description_. Defaults to None.
        """
        if inputs:
            if answer:
                prompt = self.prompt_template_dict['answer_prompt_with_inputs'].format_map({
                    'inputs': inputs,
                    'instruction': instruction,
                    'answer': answer
                })
            else:
                prompt = self.prompt_template_dict['instruction_prompt_with_inputs'].format_map({
                    'instruction': instruction,
                    'inputs': inputs
                })
        else:
            if answer:
                prompt = self.prompt_template_dict['answer_prompt_without_inputs'].format_map({
                    'inputs': inputs,
                    'instruction': instruction,
                    'answer': answer
                })
            else:
                prompt = self.prompt_template_dict['instruction_prompt_without_inputs'].format_map({
                    'instruction': instruction,
                    'answer': answer
                })
        
        return prompt
    
    def eval_subject(self, subject_name, test_df, dev_df=None, few_shot=False, save_result_dir=None):
        raise NotImplementedError("This method {} should be overrided by subclass.".format('eval_subject'))



###############################################################################################################################################################
import os
import json
import re
import random
import copy
from typing import String

import torch
from transformers import LlamaForCausalLM, LlamaTokenizer
from tqdm import tqdm

class ChineseLlamaEvaluator:
    def __init__(self, model_id_or_path, enbale_8bit, choices=['A', 'B', 'C', 'D'], fewshot_num=0):

        self.model_name = os.path.basename(model_id_or_path)
        self.choices = choices
        global tokenizer
        global model
        self.tokenizer = tokenizer
        self.model = model
        # self.tokenizer = LlamaTokenizer.from_pretrained(model_id_or_path)
        # self.model = LlamaForCausalLM.from_pretrained(model_id_or_path, load_in_8bit=enbale_8bit, device_map='auto', torch_dtype=torch.float16)
        self.model.eval()
        self.fewshot_num = fewshot_num

        self.prompt_template_dict = {
            "prompt_with_fewshot":
            (
            "示例:\n"
            "{examples}\n\n"
            "以下是中国关于{subject_name}考试的单项选择题，请参考示例回答给出正确答案。\n"
            "{question}\n\n"
            "答案:\n"
            ),
            "prompt_with_zeroshot":
            (
            "以下是中国关于{subject_name}考试的单项选择题，请选出其中的正确答案。\n"
            "{question}\n\n"
            "答案:\n"
            )
            }

        self.patterns = [
            "([ABCD])\.?.*",
            "答案是?\s?([ABCD])",
            "答案是?\s?：([ABCD])",
            "答案是?\s?:([ABCD])",
            "答案应该?是\s?([ABCD])",
            "答案应该?选\s?([ABCD])",
            "答案为\s?([ABCD])",
            "选择\s?([ABCD])",
            "只有选?项?\s?([ABCD])\s?是?对",
            "只有选?项?\s?([ABCD])\s?是?错",
            "只有选?项?\s?([ABCD])\s?不?正确",
            "只有选?项?\s?([ABCD])\s?错误",
            "说法不?对选?项?的?是\s?([ABCD])",
            "说法不?正确选?项?的?是\s?([ABCD])",
            "说法错误选?项?的?是\s?([ABCD])",
            "([ABCD])\s?是正确的",
            "([ABCD])\s?是正确答案",
            "选项\s?([ABCD])\s?正确",
            "所以答\s?([ABCD])",
            "1.\s?([ABCD])[.。$]?$",
            "所以\s?([ABCD][.。$]?$)",
            "所有\s?([ABCD][.。$]?$)",
            "[\s，：:,]([ABCD])[。，,\.]?$",
            "[\s，,：:][故即]([ABCD])[。\.]?$",
            "[\s，,：:]因此([ABCD])[。\.]?$",
            "[是为。]\s?([ABCD])[。\.]?$",
            "因此\s?([ABCD])[。\.]?$",
            "显然\s?([ABCD])[。\.]?$",
            "1.\s?(.*?)$",
            "答案是\s?(\S+)(?:。|$)",
            "答案应该是\s?(\S+)(?:。|$)",
            "答案为\s?(\S+)(?:。|$)",
        ]

    @staticmethod
    def build_examples_inputs(examples: List[Tuple[str]]):
        inputs_text = ""
        
        for example in examples:
            example_text = (
                "题目：{}\n"
                "答案：{}\n"
            ).format(example[0], example[1])
            inputs_text += example_text

        return inputs_text

    
    def build_prompt(self, subject_name:str, question:str, examples=None) -> String:
        
        if examples:
            prompt_template = self.prompt_template_dict["prompt_with_fewshot"]
            examples = self.build_examples_inputs(subject_name, examples)
            return prompt_template.format_map({ "examples" : examples, "subject_name" : subject_name, "question" : question })
        else:
            prompt_template = self.prompt_template_dict["prompt_with_zeroshot"]
            return prompt_template.format_map({ "subject_name" : subject_name, "question" : question })

    @staticmethod
    def extract_option_in_answer(answer:str, patterns: List[str]):
        regexes = [re.compile(pattern) for pattern in patterns]
        for regex in regexes:
            match = regex.search(answer)
            if match:
                return match.group(1)
        return None

    def eval_subject(self, subject_path, save_result_dir=None):

        correct_num = 0
        total_num = 0

        test_results = []
        subject_name = os.path.basename(subject_path)

        for exam_paper_file in tqdm(os.listdir(subject_path)):

            full_filename = os.path.join(subject_path, exam_paper_file)
            if not (os.path.isfile(full_filename) and (exam_paper_file.split('.')[-1] in ['json', 'jsonl'])):
                continue

            with open(full_filename, 'r', encoding='utf-8') as fp:
                    subject_tests = [json.loads(jsonl) for jsonl in fp.readlines()]
                    
                    for i in range(len(subject_tests)):

                        candidate_tests = copy.copy(subject_tests)  # 深拷贝，去除测试样本后抽样
                        a_subject_test = candidate_tests.pop(i)

                        question = a_subject_test["inputs_pretokenized"]
                        target_option = a_subject_test["targets_pretokenized"][0]

                        if self.fewshot_num > 0 :
                            assert self.fewshot_num < len(candidate_tests), "There must be at least one question used to test."
                            examples = [(example["inputs_pretokenized"], example["targets_pretokenized"][0]) for example in random.choices(candidate_tests, k=self.fewshot_num)]
                        else:
                            examples = None

                        eval_prompt = self.build_prompt(subject_name, question, examples)
                        model_inputs = self.tokenizer(eval_prompt, return_tensors="pt").to("cuda")
                        with torch.no_grad():
                            raw_output_ids = self.model.generate(**model_inputs, max_new_tokens=100)

                            # 不输出输入的题目（只要答案）
                            inputs_mask = model_inputs['attention_mask'] > 0
                            output_mask = torch.zeros(raw_output_ids.size()).to(inputs_mask)
                            output_mask[:, :inputs_mask.size(1)] = inputs_mask
                            masked_prompt_output = raw_output_ids  # 只是为了代码看起来协调一点
                            masked_prompt_output[output_mask] = self.tokenizer.pad_token_id
                            answer = self.tokenizer.decode(masked_prompt_output[0], skip_special_tokens=True)

                            predict_option = self.extract_option_in_answer(answer, self.patterns)
                            print("Predict:", predict_option, "Answer: ", target_option)

                            if predict_option == target_option:
                                correct_num += 1;
                            
                            a_test_result = copy.copy(a_subject_test)
                            a_test_result['examples'] = examples
                            a_test_result['predict_option'] = predict_option

                            test_results.append(a_test_result)
                    
                    total_num += len(subject_tests)

            if save_result_dir:
                if not os.path.exists(save_result_dir):
                    os.makedirs(save_result_dir)
                result_filename = os.path.join(save_result_dir, exam_paper_file)
                with open(result_filename, 'w', encoding='utf-8') as fp:
                    fp.writelines(map(lambda t: json.dumps(t, ensure_ascii=False) + '\n', test_results))
        
        return correct_num / total_num