import abc
from typing import List
import numpy as np
import tqdm
import json
from threading import Thread, Condition

from backend.experiment.framework.tokenizer import BaseTokenizer
from backend.experiment.framework.data import Sample, PromptSample
from backend.experiment.framework.perplexity_calculator import \
    PerplexityCalculator
from backend.experiment.framework.lmmodel import LMModel, GenerateLMModel
from backend.experiment.framework.util import ListProgressVisibleMultiprocessing
from backend.experiment.framework.log import Log


class Task:
    def __init__(self, task_id: int):
        self.__id = task_id

    @property
    def task_id(self) -> int:
        return self.__id

    def __repr__(self):
        return json.dumps({
            'class': type(self).__name__,
            'task_id': self.task_id,
        }, ensure_ascii=False)


class TaskHandler(metaclass=abc.ABCMeta):
    @abc.abstractmethod
    def handle(self, task: Task, **kwargs):
        """
        要求线程安全
        :param task:
        :return:
        """


class TaskMaker(metaclass=abc.ABCMeta):
    def __init__(
            self,
            making_task_type: type,
            token_limit: int,
            tokenizer: BaseTokenizer,
    ):
        assert issubclass(making_task_type, Task)
        self.__task_type = making_task_type
        self.__token_limit = token_limit
        self.__tokenizer = tokenizer

    @property
    def tokenizer(self) -> BaseTokenizer:
        return self.__tokenizer

    @property
    def token_limit(self) -> int:
        return self.__token_limit

    @staticmethod
    def save_tasks(tasks: List[Task], path: str) -> None:
        with open(path, 'w', encoding='utf8') as file:
            for each in tasks:
                file.write(str(each) + '\n')

    def load_tasks(self, path: str) -> List[Task]:
        objs = []
        with open(path, 'r', encoding='utf8') as file:
            for each in file.readlines():
                if 'None' not in each:
                    objs.append(json.loads(each))
                else:
                    objs.append(None)
        res = []
        for each in objs:
            if each is None:
                res.append(None)
            else:
                each.pop('class')
                res.append(self.__task_type(**each))
        return res

    def make_tasks(
            self,
            samples: List[Sample],
            insert_start_token: bool,
            verbose: bool = False,
            multiprocessing: int = 1,
            multiprocessing_batch: int = 128,
            **kwargs
    ) -> List[Task]:
        assert multiprocessing > 0
        if multiprocessing == 1:
            samples = tqdm.tqdm(samples, disable=not verbose)
            tasks = [
                self._make_task(
                    sample, insert_start_token=insert_start_token,
                    **kwargs)
                for sample in samples if sample is not None
            ]
        else:
            processor = ListProgressVisibleMultiprocessing(
                multiprocessing,
                self._make_task
            )
            kwargs['insert_start_token'] = insert_start_token
            tasks = processor.process(
                samples, multiprocessing_batch,
                kwargs=kwargs,
                verbose=verbose
            )
        return tasks

    @abc.abstractmethod
    def _make_task(self, sample: Sample, insert_start_token: bool, **kwargs) \
            -> Task:
        pass


class Candidate:
    def __init__(self, prompt: np.ndarray or list, content):
        if not isinstance(prompt, np.ndarray):
            prompt = np.array(prompt)
        self.__prompt = prompt
        self.__content = content

    @property
    def prompt(self) -> np.ndarray:
        return self.__prompt

    @property
    def content(self):
        return self.__content

    def __repr__(self):
        return json.dumps({
            'class': type(self).__name__,
            'prompt': self.prompt.tolist(),
            'content': self.content,
        }, ensure_ascii=False)

    def __str__(self):
        return self.__repr__()


class PerplexityCandidate(Candidate):
    def __init__(
            self, prompt: np.ndarray, content,
            ppl_begin: int = 0, ppl_end: int = None):
        super().__init__(prompt, content)
        self.__ppl_begin = int(ppl_begin) if ppl_begin is not None else None
        self.__ppl_end = int(ppl_end) if ppl_end is not None else None

    @property
    def ppl_begin(self) -> int:
        return self.__ppl_begin

    @property
    def ppl_end(self) -> int:
        return self.__ppl_end

    def __repr__(self):
        res = json.loads(super().__repr__())
        res.update({
            'class': type(self).__name__,
            'ppl_begin': self.ppl_begin,
            'ppl_end': self.ppl_end
        })
        return json.dumps(res, ensure_ascii=False)


class PerplexityChoiceMakingTask(Task):
    def __init__(
            self, task_id: int,
            candidates: List[PerplexityCandidate or dict]
    ):
        super().__init__(task_id)

        self.__candidates = []
        for each in candidates:
            if isinstance(each, dict):
                each.pop('class')
                self.__candidates.append(PerplexityCandidate(**each))
            else:
                self.__candidates.append(each)

    @property
    def candidates(self) -> List[PerplexityCandidate]:
        return self.__candidates

    def __repr__(self):
        res = json.loads(super().__repr__())
        res.update({
            'class': type(self).__name__,
            'candidates': [json.loads(str(each)) for each in self.candidates],
        })
        return json.dumps(res, ensure_ascii=False)


class PerplexityChoiceMakingTaskViewer:
    @staticmethod
    def read(file_path: str):
        from backend.experiment.tokenizer import tokenizer
        with open(file_path, 'r', encoding='utf8') as file:
            records = [
                json.loads(each) for each in file.readlines()
                if 'None' not in each
            ]
        for each in records:
            print(f'task_id:{each["task_id"]}\n')
            for candidate in each['candidates']:
                print(f'candidate: {candidate["content"]}:\n'
                      f'{tokenizer.token_texts(np.array(candidate["prompt"]))}'
                      f'\nppl_begin: {candidate["ppl_begin"]}\t'
                      f'ppl_end: {candidate["ppl_end"]}')
            print('=' * 60)
            input('enter to view next')


class DirectPromptTask(Task):
    def __init__(self, task_id, prompt: np.ndarray):
        super().__init__(task_id)
        if not isinstance(prompt, np.ndarray):
            prompt = np.array(prompt)
        self.__prompt = prompt

    @property
    def prompt(self) -> np.ndarray:
        return self.__prompt

    def __repr__(self):
        res = json.loads(super().__repr__())
        res.update({
            'class': type(self).__name__,
            'prompt': self.prompt.tolist(),
        })
        return json.dumps(res, ensure_ascii=False)


class DirectPromptTaskViewer:
    @staticmethod
    def read(file_path: str):
        from backend.experiment.tokenizer import tokenizer
        with open(file_path, 'r', encoding='utf8') as file:
            records = [
                json.loads(each) for each in file.readlines()
                if 'None' not in each
            ]
        for each in records:
            print(f'task_id:{each["task_id"]}')
            print(f'prompt:{tokenizer.token_texts(np.array(each["prompt"]))}')
            print('=' * 60)
            input('enter to view next')


class ClassificationTask(DirectPromptTask):
    def __init__(self, task_id, prompt: np.ndarray, label_map: List[str]):
        super().__init__(task_id, prompt)
        self.__label_map = label_map

    @property
    def label_map(self) -> List[str]:
        return self.__label_map

    def __repr__(self):
        res = json.loads(super().__repr__())
        res.update({
            'class': type(self).__name__,
            'label_map': self.label_map,
        })
        return json.dumps(res, ensure_ascii=False)


class PromptPerplexityTaskHandler(TaskHandler):
    def __init__(self, model: LMModel):
        self.__calculator = PerplexityCalculator(model)

    def handle(self, task: DirectPromptTask, **kwargs):
        inputs = self.__calculator.model.format_inputs(task.prompt)
        ppl = self.__calculator.perplexity(inputs)
        return {'perplexity': ppl}


class PromptPerplexityTaskMaker(TaskMaker):
    def __init__(self, token_limit: int, tokenizer: BaseTokenizer):
        super().__init__(
            making_task_type=DirectPromptTask,
            token_limit=token_limit,
            tokenizer=tokenizer
        )

    def _make_task(self, sample: PromptSample, insert_start_token: bool,
                   **kwargs) -> DirectPromptTask:
        tokenizer = self.tokenizer
        if insert_start_token:
            prompt = np.concatenate([
                tokenizer.sample_start_token,
                tokenizer.tokenize(sample.prompt)
            ])
        else:
            prompt = tokenizer.tokenize(sample.prompt)
        return DirectPromptTask(
            task_id=sample.sample_id,
            prompt=prompt
        )


class PerplexityChoiceMakingTaskHandler(TaskHandler, metaclass=abc.ABCMeta):
    def handle(self, task: PerplexityChoiceMakingTask, **kwargs):
        perplexities = self._get_choices_perplexity(task.candidates)
        assert len(perplexities) == len(task.candidates)
        min_index = int(np.argmin(perplexities))
        return {
            'index': min_index,
            'content': task.candidates[min_index].content,
            'perplexities': perplexities.tolist()
        }

    @abc.abstractmethod
    def _get_choices_perplexity(
            self, choices: List[PerplexityCandidate]
    ) -> np.ndarray:
        pass


class SingleThreadPerplexityChoiceMakingTaskHandler(
    PerplexityChoiceMakingTaskHandler
):
    def __init__(self, model: LMModel):
        self.__calculator = PerplexityCalculator(model)

    def _get_choices_perplexity(
            self, choices: List[PerplexityCandidate]
    ) -> np.ndarray:
        ppls = []
        for choice in choices:
            inputs = self.__calculator.model.format_inputs(choice.prompt)
            ppls.append(self.__calculator.perplexity(
                inputs, begin=choice.ppl_begin, end=choice.ppl_end
            ))
        return np.array(ppls)


class MultiThreadPerplexityChoiceMakingTaskHandler(
    PerplexityChoiceMakingTaskHandler
):
    def __init__(self, model: LMModel, thread_num: int):
        self.__calculator = PerplexityCalculator(model)
        self.__task_cond = Condition()
        self.__task_queue = []
        self.__result_cond = Condition()
        self.__result_queue = []
        self.__threads = [
            Thread(target=self._task_thread_main, args=(i,), daemon=True)
            for i in range(thread_num)
        ]

        for each in self.__threads:
            each.start()

    def _get_choices_perplexity(
            self, choices: List[PerplexityCandidate]
    ) -> np.ndarray:
        receiving_index_set = set()
        ppls = []
        Log(f'MultiThreadPerplexityChoiceMakingTaskHandler-'
            f'main-thread set choice task, choices len:{len(choices)}', -1)
        with self.__task_cond:
            for i, choice in enumerate(choices):
                ppls.append(0)
                receiving_index_set.add(i)
                self.__task_queue.append(
                    (i, choice.prompt, choice.ppl_begin, choice.ppl_end)
                )
            self.__task_cond.notify_all()

        while len(receiving_index_set) > 0:
            with self.__result_cond:
                while len(self.__result_queue) == 0:
                    Log(f'MultiThreadPerplexityChoiceMakingTaskHandler-'
                        f'main-thread wait result', -1)
                    self.__result_cond.wait()
                while len(self.__result_queue) > 0:
                    index, ppl = self.__result_queue.pop(0)
                    Log(f'MultiThreadPerplexityChoiceMakingTaskHandler-'
                        f'main-thread got choice {index} result '
                        f'ppl: {ppl}', -1)
                    ppls[index] = ppl
                    receiving_index_set.remove(index)
        Log(f'MultiThreadPerplexityChoiceMakingTaskHandler-'
            f'main-thread finished '
            f'ppls: {ppls}', -1)
        return np.array(ppls)

    def _task_thread_main(self, thread_id: int):
        Log(f'MultiThreadPerplexityChoiceMakingTaskHandler-thread-{thread_id} '
            f'ready', -1)
        while True:
            with self.__task_cond:
                while len(self.__task_queue) == 0:
                    Log(f'MultiThreadPerplexityChoiceMakingTaskHandler-'
                        f'thread-{thread_id} wait choice task', -1)
                    self.__task_cond.wait()
                index, prompt, begin, end = self.__task_queue.pop(0)
                Log(f'MultiThreadPerplexityChoiceMakingTaskHandler-'
                    f'thread-{thread_id} got choice {index}', -1)
            inputs = self.__calculator.model.format_inputs(prompt)
            Log(f'MultiThreadPerplexityChoiceMakingTaskHandler-'
                f'thread-{thread_id} calculator handling choice {index}', -1)
            ppl = self.__calculator.perplexity(inputs, begin=begin, end=end)
            Log(f'MultiThreadPerplexityChoiceMakingTaskHandler-'
                f'thread-{thread_id} calculator handled choice {index}, '
                f'ppl: {ppl}', -1)
            with self.__result_cond:
                self.__result_queue.append((index, ppl))
                self.__result_cond.notify_all()


class GenerativeClassificationTaskHandler(TaskHandler):
    def __init__(
            self, model: LMModel, verb_size: int, label_map: List[str]
    ):
        self.__model = model
        self.__label_map = label_map
        self.__verb_size = verb_size
        _, self.__choice_chain = self.__build_choice_chain(
            range(len(label_map)), 0
        )

    @property
    def model(self) -> LMModel:
        return self.__model

    @property
    def verb_size(self) -> int:
        return self.__verb_size

    @property
    def label_map(self) -> List[str]:
        return self.__label_map

    def handle(self, task: ClassificationTask, **kwargs) -> int:
        assert len(kwargs) == 0
        inputs = self.model.format_inputs(task.prompt)
        choices = (None, self.__choice_chain)
        while True:
            outputs = self.model.predict(inputs)
            # print(f'outputs shapes: {outputs.shape}\n'
            #       f'outputs sum: {np.sum(outputs)}\n'
            #       f'outputs mean: {np.mean(outputs)}')
            assert len(outputs.shape) == 1
            if np.abs(np.sum(outputs) - 1) >= 1e-3:
                # 求和不是1进行softmax归一化
                out_exps = np.exp(outputs)
                outputs = out_exps / np.sum(out_exps)

            if choices[0] is None:
                alternative = [each for each in choices[1].keys()]
                outputs = outputs[alternative]
            else:
                if len(choices[1]) < 1:
                    return choices[0]
                # 截取所有备选项的输出
                alternative = [each for each in choices[1].keys()]
                all_choices = float(len(alternative) + 1)
                outputs = outputs[alternative]
                continue_predict = np.sum(outputs) >= 1 / all_choices
                if not continue_predict:
                    return choices[0]
            # print(f'label predict: {outputs}')
            next_token_id = alternative[int(np.argmax(outputs))]
            next_token_id_array = np.array(next_token_id)
            next_token_id_array_shape = [
                -1 for _ in range(len(inputs.shape) - 1)]
            next_token_id_array_shape.append(1)
            next_token_id_array = np.reshape(
                next_token_id_array, next_token_id_array_shape)
            inputs = np.append(inputs, next_token_id_array, axis=-1)
            choices = choices[1][next_token_id]

            if choices[0] is not None and len(choices[1]) == 0:
                return choices[0]

    def __build_choice_chain(self, label_choices, step: int) -> \
            (int or None, dict):
        res_label = None
        res_dict = {}
        step_choices = {}
        for label in label_choices:
            label_tokens = self.label_map[label]
            if step < len(label_tokens):
                token_id = label_tokens[step]
                if token_id not in step_choices.keys():
                    step_choices[token_id] = []
                step_choices[token_id].append(label)
            else:
                assert res_label is None
                res_label = label
        for token_id, choices in step_choices.items():
            if len(choices) <= 1:
                assert len(choices) == 1
                res_dict[token_id] = (choices[0], {})
            else:
                res_dict[token_id] = self.__build_choice_chain(
                    choices, step + 1)
        return res_label, res_dict


class GenerationTaskHandler(TaskHandler, metaclass=abc.ABCMeta):
    def __init__(
            self, max_generate_count: int,
            model: GenerateLMModel,
            tokenizer: BaseTokenizer
    ):
        self.__max_generate_count = max_generate_count
        self.__model = model
        self.__tokenizer = tokenizer

    @property
    def max_generate_count(self) -> int:
        return self.__max_generate_count

    def handle(self, task: DirectPromptTask, **kwargs):
        generate_token = self.__model.generate(
            task.prompt,
            min(
                self.max_generate_count,
                self.__model.context_length - len(task.prompt) - 1
            ),
            **kwargs
        )
        generate_texts = self.__tokenizer.token_texts(generate_token)
        return {
            'generated': generate_texts
        }
