import os
from os.path import join, exists
import json
import tqdm
import abc
from enum import Enum
from typing import Set, List, Dict, Any
import numpy as np
import random
from backend.experiment.framework.data import Sample, \
    PromptClassificationSample, \
    PromptChoiceSample
from backend.experiment.framework.task import Task, TaskHandler, \
    SingleThreadPerplexityChoiceMakingTaskHandler, \
    MultiThreadPerplexityChoiceMakingTaskHandler, \
    GenerationTaskHandler
from backend.experiment.framework.implement import BaseImplement
from backend.experiment.framework.log import Log
from backend.experiment.framework.tokenizer import BaseTokenizer, \
    MultiprocessingTokenizer
from backend.experiment.framework.shots_prompt.data import ShotsGenerator


class ShotMode(Enum):
    ZERO_SHOT = 0
    ONE_SHOT = 1
    FEW_SHOT = 2


class ShotsPromptImplement(BaseImplement, metaclass=abc.ABCMeta):
    __sorted_example_tokens: List[Dict[str, Any]] = []
    __shuffle_example_tokens: List[Dict[str, Any]] = []
    __make_task_processes = 16
    __make_task_batch = 128
    __tokenize_processes = 16
    __tokenize_batch = 128
    __tokenizer = None
    __test_samples: List[Sample] = []
    __seed = 42
    __random_seed_per_sample = True

    def __init__(
            self,
            tokenizer: BaseTokenizer,
            test_dev: bool = False,
            make_task_processes: int = 16,
            make_task_batch: int = 128,
            tokenize_processes: int = 16,
            tokenize_batch: int = 128,
            seed: int = 42,
            random_seed_per_sample: bool = True,
    ):
        super().__init__(
            tokenizer=tokenizer,
            test_dev=test_dev,
            make_task_processes=make_task_processes,
            make_task_batch=make_task_batch,
            tokenize_processes=tokenize_processes,
            tokenize_batch=tokenize_batch,
            seed=seed,
            random_seed_per_sample=random_seed_per_sample,
        )

    def get_tasks(
            self, mode: ShotMode,
            cache_filename: str,
            insert_start_token: bool,
            shots_limit: int = None,
            min_shots: int = None,
            **kwargs
    ) -> List[Task]:
        if shots_limit is not None:
            assert isinstance(shots_limit, int) and shots_limit >= 0
        if min_shots is not None:
            assert isinstance(min_shots, int) and min_shots >= 0
        Log(f'getting task, log_level: {Log.log_level()}')

        assert len(cache_filename) > 0
        if not cache_filename.endswith('.json'):
            cache_filename += '.json'
        cache_path = join(self.cache_dir(), cache_filename)

        task_maker = self.get_task_maker()
        if not exists(cache_path):
            Log(f'making tasks')
            if mode == ShotMode.FEW_SHOT:
                kwargs['min_shots'] = 2
                if shots_limit is not None:
                    assert isinstance(shots_limit, int) and shots_limit >= 2
                    kwargs['shots_limit'] = shots_limit
            elif mode == ShotMode.ONE_SHOT:
                kwargs['shots_limit'] = 1
                kwargs['min_shots'] = 1
            elif mode == ShotMode.ZERO_SHOT:
                kwargs['shots_limit'] = 0
            tasks = task_maker.make_tasks(
                samples=self.get_test_samples(),
                insert_start_token=insert_start_token,
                multiprocessing=self.make_task_processes(),
                verbose=Log.log_level() <= 0,
                multiprocessing_batch=self.make_task_batch(),
                **kwargs
            )
            Log(f'saving tasks to {cache_path}')
            task_maker.save_tasks(tasks, cache_path)
        else:
            Log(f'load task from {cache_path}')
            tasks = task_maker.load_tasks(cache_path)
        return tasks

    @classmethod
    def random_seed_per_sample(cls) -> bool:
        return cls.__random_seed_per_sample

    @classmethod
    def seed(cls) -> int:
        return cls.__seed

    @classmethod
    def tokenizer(cls) -> BaseTokenizer:
        return cls.__tokenizer

    @classmethod
    def make_task_processes(cls) -> int:
        return cls.__make_task_processes

    @classmethod
    def make_task_batch(cls) -> int:
        return cls.__make_task_batch

    @classmethod
    def tokenize_processes(cls) -> int:
        return cls.__tokenize_processes

    @classmethod
    def tokenize_batch(cls) -> int:
        return cls.__tokenize_batch

    @classmethod
    def get_test_samples(cls) -> List[Sample]:
        return cls.__test_samples

    @classmethod
    def cache_dir(cls) -> str:
        raise NotImplementedError

    @classmethod
    def data_dir(cls) -> str:
        raise NotImplementedError

    @classmethod
    def get_sorted_example_tokens(cls) -> List[Dict[str, Any]]:
        return cls.__sorted_example_tokens

    @classmethod
    def get_shuffle_example_tokens(cls) -> List[Dict[str, Any]]:
        return cls.__shuffle_example_tokens

    @classmethod
    def _initialize(
            cls,
            tokenizer: BaseTokenizer,
            test_dev: bool,
            make_task_processes: int,
            make_task_batch: int,
            tokenize_processes: int,
            tokenize_batch: int,
            seed: int,
            random_seed_per_sample: int,
    ):
        Log(f'{cls.__name__} initializing with seed {seed}')
        cls.__tokenizer = tokenizer
        cls.__make_task_processes = make_task_processes
        cls.__make_task_batch = make_task_batch
        cls.__tokenize_processes = tokenize_processes
        cls.__tokenize_batch = tokenize_batch
        cls.__seed = seed
        random.seed(seed)
        np.random.seed(seed)
        cls.__random_seed_per_sample = random_seed_per_sample
        assert cls.make_task_processes() > 0

        if not exists(cls.cache_dir()):
            os.makedirs(cls.cache_dir())
        example_tokens = cls._process_train_data(append_dev=not test_dev)
        cls.__test_samples = cls._process_test_data(test_dev=test_dev)
        cls.__sorted_example_tokens = \
            sorted(example_tokens, key=lambda x: len(x['token']))
        cls.__shuffle_example_tokens = example_tokens.copy()
        random.shuffle(cls.__shuffle_example_tokens)
        Log(f'{cls.__name__} initialized')

    @classmethod
    def _make_test_sample(cls, record, made_sample_num: int) -> \
            Sample or List[Sample]:
        raise NotImplementedError

    @classmethod
    def _read_train_records(cls) -> list:
        raise NotImplementedError

    @classmethod
    def _read_dev_records(cls) -> list:
        raise NotImplementedError

    @classmethod
    def _read_test_records(cls) -> list:
        raise NotImplementedError

    @classmethod
    def _make_prompt(cls, record, mask_target: bool) -> str or List[str]:
        raise NotImplementedError

    @classmethod
    def _process_train_data(cls, append_dev: bool) -> List[Dict[str, Any]]:
        Log('processing training data')
        example_tokens = []
        records = cls._read_train_records()

        if append_dev:
            records.extend(cls._read_dev_records())

        examples_token_cache_path = join(cls.cache_dir(), 'example_token.json')
        cache_loaded = False
        if exists(examples_token_cache_path):
            Log(f'found examples token cache: {examples_token_cache_path}')
            with open(examples_token_cache_path, 'r', encoding='utf8') as file:
                example_tokens = [json.loads(each) for each in file]
            cache_loaded = True
            Log('loaded examples token cache')

        if cls.tokenize_processes() > 1 and not cache_loaded:
            # todo: 没做不是多进程的情况
            Log(f'multiprocessing={cls.tokenize_processes()}'
                f' compute token, prepare texts')
            mt = MultiprocessingTokenizer(
                cls.tokenizer(),
                processes=cls.__tokenize_processes)

            prompts = []
            prompts_record_index = []
            for i, record in tqdm.tqdm(
                    enumerate(records), disable=Log.log_level() <= 0):
                prompt = cls._make_prompt(record, False)
                if isinstance(prompt, list):
                    for _ in prompt:
                        prompts_record_index.append(i)
                    prompts.extend(prompt)
                else:
                    prompts.append(prompt)
                    prompts_record_index.append(i)
            Log(f'texts prepared, computing token:')
            tokens = mt.tokenize(prompts, cls.__tokenize_batch,
                                 verbose=Log.log_level() <= 0)
            assert len(tokens) == len(prompts)
            example_tokens.clear()
            for i in range(len(prompts)):
                example_tokens.append({
                    'text': prompts[i],
                    'token': tokens[i],
                    'index': prompts_record_index[i],
                    'record': records[prompts_record_index[i]]
                })

        if not exists(examples_token_cache_path):
            Log(f'saving examples token to: {examples_token_cache_path}')
            with open(examples_token_cache_path, 'w', encoding='utf8') as file:
                for each in example_tokens:
                    file.write(
                        json.dumps({
                            'text': each['text'],
                            'token': each['token'].tolist(),
                            'index': each['index'],
                            'record': records[each['index']]
                        }, ensure_ascii=False) + '\n'
                    )
        return example_tokens

    @classmethod
    def _finalize(cls):
        Log(f'{cls.__name__} finalizing')
        cls.__test_samples.clear()
        cls.__sorted_example_tokens.clear()
        Log(f'{cls.__name__} finalized')

    @classmethod
    def _process_test_data(cls, test_dev: bool) -> List[Sample]:
        samples = []
        Log('processing test data')
        if test_dev:
            records = cls._read_dev_records()
        else:
            records = cls._read_test_records()

        for record in tqdm.tqdm(records, disable=Log.log_level() <= 0):
            record = cls._make_test_sample(record, len(samples))
            if isinstance(record, list):
                samples.extend(record)
            else:
                samples.append(record)
        if cls.random_seed_per_sample():
            random.seed(cls.seed())
            for sample in samples:
                if sample is not None:
                    sample.seed = random.randint(0, int(1e9))
        return samples


class CommonShotsGenerator(ShotsGenerator):
    def __init__(self, length_sorted_example_tokens: List[Dict[str, Any]]):
        self.__example_tokens = length_sorted_example_tokens

    def get_shot(
            self,
            limit_remain: int,
            selected_shots: Set[int],
            **kwargs
    ) -> (np.ndarray, int) or None:
        # 二分法查找符合长度的样例
        end = len(self.__example_tokens) - 1
        while end >= 0:
            mid = end // 2
            if len(self.__example_tokens[mid]['token']) <= limit_remain:
                alternatives = list(filter(
                    lambda x: x not in selected_shots,
                    range(mid + 1)
                ))
                if len(alternatives) == 0:
                    return None
                shot_id = random.randint(0, len(alternatives) - 1)
                shot = self.__example_tokens[alternatives[shot_id]]
                return shot['token'], shot_id
            else:
                end = mid - 1
        # 找不到长度比要求更小的了
        return None


class CommonStaticShotsGenerator(ShotsGenerator):
    def __init__(self, example_tokens: List[Dict[str, Any]]):
        self.__example_tokens = example_tokens

    def get_shot(
            self,
            limit_remain: int,
            selected_shots: Set[int],
            **kwargs
    ) -> (np.ndarray, int) or None:
        for i, each in enumerate(self.__example_tokens):
            if i in selected_shots:
                continue
            token = each['token']
            if len(token) <= limit_remain:
                return token, i
        # 找不到长度比要求更小的了
        return None


class CommonLabelBalanceStaticShotsGenerator(ShotsGenerator):
    def __init__(self, labeled_example_tokens: List[List[Dict[str, Any]]]):
        assert len(labeled_example_tokens) > 0
        self.__labeled_example_tokens = labeled_example_tokens

    def get_shot(
            self, limit_remain: int, selected_shots: Set[int],
            **kwargs
    ) -> (np.ndarray, int) or None:
        label_num = len(self.__labeled_example_tokens)
        this_example_label = len(selected_shots) % label_num
        id_cumsum = 0
        for i in range(this_example_label):
            id_cumsum += len(self.__labeled_example_tokens[i])
        for i, each in enumerate(
                self.__labeled_example_tokens[this_example_label]):
            if i + id_cumsum in selected_shots:
                continue
            token = each['token']
            if len(token) <= limit_remain:
                return token, i + id_cumsum
        # 找不到长度比要求更小的了
        return None


class CommonPromptClassificationSample(PromptClassificationSample):
    def __init__(self, sample_id: int, target_mask_token: str, prompt: str):
        super().__init__(sample_id=sample_id, prompt=prompt)
        self.__target_mask_token = target_mask_token

    def fill_prompt_with_label_content(self, label_content: str) -> str:
        return self.prompt.replace(self.__target_mask_token, label_content)


class CommonGenerationImplement(ShotsPromptImplement, metaclass=abc.ABCMeta):
    @classmethod
    def max_generate_count(cls) -> int:
        raise NotImplementedError

    @classmethod
    def get_task_handler(cls) -> TaskHandler:
        from backend.experiment.model import model
        from backend.experiment.tokenizer import tokenizer
        return GenerationTaskHandler(
            max_generate_count=cls.max_generate_count(),
            model=model, tokenizer=tokenizer
        )


class CommonPerplexityClassificationImplement(
    ShotsPromptImplement, metaclass=abc.ABCMeta
):
    def __init__(
            self,
            tokenizer: BaseTokenizer,
            test_dev: bool = False,
            make_task_processes: int = 16,
            make_task_batch: int = 16,
            tokenize_processes: int = 16,
            tokenize_batch: int = 16,
            seed: int = 42,
    ):
        ShotsPromptImplement.__init__(
            self,
            tokenizer=tokenizer,
            test_dev=test_dev,
            make_task_processes=make_task_processes,
            make_task_batch=make_task_batch,
            tokenize_processes=tokenize_processes,
            tokenize_batch=tokenize_batch,
            seed=seed
        )

    @classmethod
    def label_map(cls) -> List[str]:
        raise NotImplementedError

    @classmethod
    def inter_task_thread_num(cls) -> int:
        return 8

    @classmethod
    def get_task_handler(cls) -> TaskHandler:
        from backend.experiment.model import model
        thread_num = cls.inter_task_thread_num()
        assert thread_num > 0
        if thread_num == 1:
            return SingleThreadPerplexityChoiceMakingTaskHandler(model)
        return MultiThreadPerplexityChoiceMakingTaskHandler(
            model, cls.inter_task_thread_num())

    @classmethod
    def classify_example_tokens(
            cls, example_tokens: List[Dict[str, Any]]) -> \
            List[List[Dict[str, Any]]]:
        labeled_example_tokens = [[] for _ in range(len(cls.label_map()))]
        for each in example_tokens:
            label = cls._get_record_label(each['record'])
            assert label < len(labeled_example_tokens)
            labeled_example_tokens[label].append(each)
        return labeled_example_tokens

    @classmethod
    def _get_record_label(cls, record) -> int:
        raise NotImplementedError


class CommonPerplexityChoiceMakingSample(PromptChoiceSample):
    def __init__(
            self, sample_id: int,
            target_mask_token: str,
            candidates: list, prompt: str,
            seed: int = 42
    ):
        super().__init__(
            sample_id=sample_id,
            candidates=candidates,
            prompt=prompt, seed=seed
        )
        self.__target_mask_token = target_mask_token

    def fill_prompt_with_candidate(self, candidate) -> str:
        return self.prompt.replace(self.__target_mask_token, candidate)


class CommonPerplexityChoiceMakingImplement(
    ShotsPromptImplement, metaclass=abc.ABCMeta
):
    def __init__(
            self,
            tokenizer: BaseTokenizer,
            test_dev: bool = False,
            make_task_processes: int = 16,
            make_task_batch: int = 16,
            tokenize_processes: int = 16,
            tokenize_batch: int = 16,
            seed: int = 42,
    ):
        ShotsPromptImplement.__init__(
            self,
            tokenizer=tokenizer,
            test_dev=test_dev,
            make_task_processes=make_task_processes,
            make_task_batch=make_task_batch,
            tokenize_processes=tokenize_processes,
            tokenize_batch=tokenize_batch,
            seed=seed
        )

    @classmethod
    def inter_task_thread_num(cls) -> int:
        return 8

    @classmethod
    def get_task_handler(cls) -> TaskHandler:
        from backend.experiment.model import model
        thread_num = cls.inter_task_thread_num()
        assert thread_num > 0
        if thread_num == 1:
            return SingleThreadPerplexityChoiceMakingTaskHandler(model)
        return MultiThreadPerplexityChoiceMakingTaskHandler(
            model, cls.inter_task_thread_num())
