from os.path import join, abspath, exists
import json
import tqdm
import numpy as np
import random
import re
from typing import List, Set, Dict, Tuple, Any
from backend.experiment.framework.shots_prompt.task import \
    PerplexityPromptShotsChoiceMakingTaskMaker
from backend.experiment.framework.task import TaskMaker
from backend.experiment.framework.log import Log
from backend.experiment.framework.shots_prompt.data import ShotsGenerator
from backend.experiment.tokenizer import tokenizer
from backend.experiment.framework.shots_prompt.implement import \
    CommonPerplexityChoiceMakingImplement, \
    CommonPerplexityChoiceMakingSample
from backend.experiment.model import context_limit
from backend.experiment.framework.tokenizer import MultiprocessingTokenizer


class CHIDShotsGenerator(ShotsGenerator):
    def __init__(self, implement: 'CHIDImplement'):
        assert isinstance(implement, CHIDImplement)
        self.__implement = implement

    def get_shot(
            self, limit_remain: int, selected_shots: Set[int],
            **kwargs
    ) -> (np.ndarray, int) or None:
        shot_count = len(selected_shots)
        candidate = kwargs['candidate']
        if shot_count == 0:
            # 成语第一个示例返回成语的释义
            shot = self.__implement.idiom_meaning[candidate][1]
            assert len(shot) <= limit_remain, \
                f'{candidate}, {len(shot)}, {limit_remain}'
            return shot, -1
        else:
            # 二分法查找符合长度的成语样例
            assert candidate in self.__implement.sorted_idiom_examples
            examples = self.__implement.sorted_idiom_examples[candidate]
            end = len(examples) - 1
            while end >= 0:
                mid = end // 2
                if len(examples[mid]) <= limit_remain:
                    alternatives = list(filter(
                        lambda x: x not in selected_shots,
                        range(mid + 1)
                    ))
                    if len(alternatives) == 0:
                        return None
                    shot_id = random.randint(0, len(alternatives) - 1)
                    return examples[alternatives[shot_id]], shot_id
                else:
                    end = mid - 1
            # 找不到长度比要求更小的了
            return None


class CHIDImplement(CommonPerplexityChoiceMakingImplement):
    __mask_token = '[MASK]'
    __idiom_examples: Dict[str, List[str]] = {}
    __idiom_meaning: Dict[str, Tuple[str, np.ndarray]] = {}
    __sorted_idiom_examples: Dict[str, List[np.ndarray]] = {}
    __idiom_examples_token: Dict[str, np.ndarray] = {}
    __answer: Dict[str, int] = {}

    @property
    def idiom_examples(self) -> Dict[str, List[str]]:
        return self.__idiom_examples

    @property
    def idiom_meaning(self) -> Dict[str, Tuple[str, np.ndarray]]:
        return self.__idiom_meaning

    @property
    def sorted_idiom_examples(self) -> Dict[str, List[np.ndarray]]:
        return self.__sorted_idiom_examples

    @property
    def idiom_examples_token(self) -> Dict[str, np.ndarray]:
        return self.__idiom_examples_token

    @classmethod
    def _make_test_sample(cls, record, made_sample_num: int) -> \
            List[CommonPerplexityChoiceMakingSample]:
        res = []
        candidates: List[str] = record['candidates']
        contents: List[str] = record['content']
        for content in contents:
            found = re.findall('#idiom[0-9]*#', content)
            assert len(found) > 0
            for blank in found:
                prompt = content.replace(blank, cls.__mask_token)
                prompt = re.sub('#idiom[0-9]*#', '[UNK]' * 4, prompt)
                res.append(
                    CommonPerplexityChoiceMakingSample(
                        candidates=candidates,
                        prompt=prompt,
                        sample_id=int(
                            blank.replace('idiom', '').replace('#', '')),
                        target_mask_token=cls.__mask_token
                    )
                )
                made_sample_num += 1
        return res

    @classmethod
    def _read_train_records(cls) -> list:
        return []

    @classmethod
    def _read_dev_records(cls) -> list:
        with open(join(cls.data_dir(), 'dev.json'), 'r', encoding='utf8') \
                as file:
            records = [json.loads(each) for each in file.readlines()]
        return records

    @classmethod
    def _read_test_records(cls) -> list:
        with open(join(cls.data_dir(), 'test.json'), 'r', encoding='utf8') \
                as file:
            records = [json.loads(each) for each in file.readlines()]
        return records

    @classmethod
    def _process_train_data(cls, append_dev: bool) -> List[Dict[str, Any]]:
        Log('processing training data')
        cls.__get_idiom_meaning()

        with open(join(cls.data_dir(), 'train.json'), 'r', encoding='utf8') \
                as file:
            records = [json.loads(each) for each in file.readlines()]
        with open(join(cls.data_dir(), 'train_answer.json'), 'r',
                  encoding='utf8') as file:
            cls.__answer = json.loads(file.read())

        if append_dev:
            with open(join(cls.data_dir(), 'dev.json'), 'r', encoding='utf8') \
                    as file:
                records.extend([json.loads(each) for each in file.readlines()])
            with open(join(cls.data_dir(), 'dev_answer.json'), 'r',
                      encoding='utf8') as file:
                cls.__answer.update(json.loads(file.read()))

        cache_loaded = False
        cache = None
        idiom_examples_token_cache_path = join(
            cls.cache_dir(), 'idiom_examples_token.json')
        if exists(idiom_examples_token_cache_path):
            Log(f'found examples token cache: '
                f'{idiom_examples_token_cache_path}')
            with open(idiom_examples_token_cache_path, 'r',
                      encoding='utf8') as file:
                cache = json.loads(file.read())
            cache_loaded = True
            Log('loaded examples token cache')

        if cls.tokenize_processes() > 1 and not cache_loaded:
            Log(f'multiprocessing={cls.tokenize_processes()}'
                f' compute token, prepare texts')
            mt = MultiprocessingTokenizer(
                tokenizer,
                processes=cls.tokenize_processes())

            prompts = []
            for record in tqdm.tqdm(records, disable=Log.log_level() <= 0):
                prompts.extend(cls._make_prompt(record, False))
            Log(f'texts prepared, computing token:')
            tokens = mt.tokenize(prompts, cls.tokenize_batch(),
                                 verbose=Log.log_level() <= 0)
            assert len(tokens) == len(prompts)
            cache = {}
            cache_loaded = True
            for i in range(len(prompts)):
                cache[prompts[i]] = tokens[i]
        for record in tqdm.tqdm(records, disable=Log.log_level() <= 0):
            candidates: List[str] = record['candidates']
            contents: List[str] = record['content']
            for content in contents:
                found = re.findall('#idiom[0-9]*#', content)
                assert len(found) > 0
                complete = content
                sample_of = set()
                for blank in found:
                    assert blank in cls.__answer.keys()
                    correct_idiom = candidates[cls.__answer[blank]]
                    complete = complete.replace(blank, correct_idiom)
                    sample_of.add(correct_idiom)
                if cache_loaded or cls.tokenize_processes() > 1:
                    cls.__idiom_examples_token[complete] = \
                        np.array(cache[complete])
                else:
                    cls.__idiom_examples_token[complete] = tokenizer.tokenize(
                        complete)
                for idiom in sample_of:
                    if idiom not in cls.__idiom_examples.keys():
                        cls.__idiom_examples[idiom] = []
                    cls.__idiom_examples[idiom].append(complete)

        if not exists(idiom_examples_token_cache_path):
            Log(
                f'saving examples token to: '
                f'{idiom_examples_token_cache_path}')
            save_dict = {}
            for k, v in cls.__idiom_examples_token.items():
                save_dict[k] = v.astype('int').tolist()
            with open(idiom_examples_token_cache_path, 'w', encoding='utf8') \
                    as file:
                file.write(json.dumps(save_dict, ensure_ascii=False))

        Log('sorting idiom examples by token length')
        for idiom, examples in cls.__idiom_examples.items():
            tokens = [cls.__idiom_examples_token[each] for each in examples]
            cls.__sorted_idiom_examples[idiom] = \
                sorted(tokens, key=lambda x: len(x))

        return []

    @classmethod
    def _make_prompt(cls, record, mask_target: bool) -> List[str]:
        res = []
        candidates: List[str] = record['candidates']
        contents: List[str] = record['content']
        for content in contents:
            found = re.findall('#idiom[0-9]*#', content)
            assert len(found) > 0
            prompt = content
            for blank in found:
                assert blank in cls.__answer.keys()
                correct_idiom = candidates[cls.__answer[blank]]
                prompt = prompt.replace(blank, correct_idiom)
            res.append(prompt)
        return res

    def get_task_maker(self) -> TaskMaker:
        return PerplexityPromptShotsChoiceMakingTaskMaker(
            token_limit=context_limit,
            tokenizer=tokenizer,
            shots_generator=CHIDShotsGenerator(implement=self),
        )

    @classmethod
    def data_dir(cls) -> str:
        return abspath(join(__file__, '../..'))

    @classmethod
    def cache_dir(cls) -> str:
        return join(cls.work_dir(), 'cache')

    @classmethod
    def work_dir(cls) -> str:
        return abspath(join(__file__, '..'))

    @classmethod
    def _finalize(cls):
        cls.__idiom_meaning.clear()
        cls.__idiom_examples.clear()
        cls.__sorted_idiom_examples.clear()
        cls.__idiom_examples_token.clear()
        cls.__answer.clear()

    @classmethod
    def __get_idiom_meaning(cls):
        Log('reading idiom meaning')
        with open(join(cls.data_dir(), 'idiomDict_modified.json'), 'r',
                  encoding='utf8') as file:
            idiom_meaning_dict = json.loads(file.read())
        cache = None
        idiom_meaning_token_cache_path = join(
            cls.cache_dir(), 'idiom_meaning_token.json')
        if exists(idiom_meaning_token_cache_path):
            with open(idiom_meaning_token_cache_path, 'r', encoding='utf8') \
                    as file:
                cache = json.loads(file.read())

        if cls.tokenize_processes() == 1 or cache is not None:
            for idiom, meaning in tqdm.tqdm(
                    idiom_meaning_dict.items(), disable=Log.log_level() <= 0):
                meaning = f'{idiom}：{meaning}'
                token = tokenizer.tokenize(meaning) if cache is None else \
                    np.array(cache[idiom])
                cls.__idiom_meaning[idiom] = (meaning, token)
        else:
            idiom_list = []
            meaning_list = []
            for idiom, meaning in idiom_meaning_dict.items():
                meaning_list.append(f'{idiom}：{meaning}')
                idiom_list.append(idiom)
            mt = MultiprocessingTokenizer(
                tokenizer,
                processes=cls.tokenize_processes())
            token_list = mt.tokenize(
                meaning_list, cls.tokenize_batch(), verbose=Log.log_level() <= 0
            )
            for i in range(len(meaning_list)):
                cls.__idiom_meaning[idiom_list[i]] = (
                    meaning_list[i], token_list[i])

        if cache is None:
            save_dict = {}
            idiom_meaning_token_cache_path = join(
                cls.cache_dir(), 'idiom_meaning_token.json')
            Log('saving idioms meaning token')
            for idiom, meaning in cls.__idiom_meaning.items():
                save_dict[idiom] = meaning[1].tolist()
            with open(idiom_meaning_token_cache_path, 'w',
                      encoding='utf8') as file:
                file.write(json.dumps(save_dict, ensure_ascii=False))
            Log('saved idiom meaning token')
