import abc
import numpy as np
from typing import List, Union
import random
from backend.experiment.framework.task import PerplexityCandidate, TaskMaker, \
    PerplexityChoiceMakingTask, Task, DirectPromptTask
from backend.experiment.framework.data import PromptSample, PromptChoiceSample, \
    PromptClassificationSample
from backend.experiment.framework.tokenizer import BaseTokenizer
from backend.experiment.framework.shots_prompt.data import ShotsGenerator


class ShotsPromptTaskMaker(TaskMaker, metaclass=abc.ABCMeta):
    def __init__(
            self,
            making_task_type: type,
            token_limit: int,
            tokenizer: BaseTokenizer,
            shots_generator: ShotsGenerator,
            first_prompt: np.ndarray = None
    ):
        super().__init__(
            making_task_type=making_task_type,
            token_limit=token_limit,
            tokenizer=tokenizer)
        self.__shots_generator = shots_generator
        self.__first_prompt = first_prompt

    @property
    def first_prompt(self) -> np.ndarray:
        return self.__first_prompt

    @property
    def shots_generator(self) -> ShotsGenerator:
        return self.__shots_generator

    @abc.abstractmethod
    def _make_task(
            self, sample: PromptSample,
            insert_start_token: bool,
            shots_limit: int = None,
            min_shots: int = None,
            **kwargs) -> Task:
        pass

    def _make_prompt_with_shots(
            self,
            prompt: np.ndarray,
            insert_start_token: bool,
            shots_limit: int = None,
            min_shots: int = None,
            seed: int = None,
            **kwargs
    ) -> np.ndarray:
        if shots_limit is not None:
            assert isinstance(shots_limit, int) and shots_limit >= 0
        if seed is not None:
            random.seed(seed)
        sep_len = len(self.tokenizer.paragraph_sep_token)
        limit_remain = self.token_limit - 1 - len(prompt) - sep_len
        shots = []
        if insert_start_token:
            shots.append(self.tokenizer.sample_start_token)
            limit_remain -= len(self.tokenizer.sample_start_token)
        if self.first_prompt is not None:
            limit_remain -= len(self.first_prompt)
            shots.append(self.first_prompt)

        selected_shots = set()
        shots_count = 0
        while ((shots_limit is not None and shots_count < shots_limit)
               or shots_limit is None) and limit_remain > 0:
            res = self.shots_generator.get_shot(
                limit_remain, selected_shots=selected_shots, **kwargs
            )
            if res is None:
                break
            shot, shot_id = res
            selected_shots.add(shot_id)
            limit_remain -= len(shot) + sep_len
            shots.append(shot)
            shots.append(self.tokenizer.paragraph_sep_token)
            shots_count += 1
        if min_shots is not None:
            assert \
                isinstance(min_shots, int) and 0 <= min_shots <= shots_count, \
                f'can not get enough examples.' \
                f'prompt len: {len(prompt)}: \n' \
                f'{self.tokenizer.token_texts(prompt)}\n' \
                f'shots got: {shots_count}, ' \
                f'shots len: {sum(len(each) for each in shots)}'
        shots.append(prompt)
        res = np.concatenate(shots, axis=0)
        assert len(res) <= self.token_limit, f'{len(res)} > {self.token_limit}'
        return res


class PerplexityPromptShotsChoiceMakingTaskMaker(
    ShotsPromptTaskMaker,
    metaclass=abc.ABCMeta
):
    def __init__(
            self,
            token_limit: int,
            tokenizer: BaseTokenizer,
            shots_generator: ShotsGenerator,
            first_prompt: np.ndarray = None,
    ):
        super().__init__(
            token_limit=token_limit,
            tokenizer=tokenizer,
            shots_generator=shots_generator,
            first_prompt=first_prompt,
            making_task_type=PerplexityChoiceMakingTask
        )

    def _make_task(
            self, sample: PromptChoiceSample,
            insert_start_token: bool,
            only_calculate_last: bool = False,
            shots_limit: int = None,
            min_shots: int = None,
            **kwargs
    ) -> Union[PerplexityChoiceMakingTask, None]:
        if sample is None:
            return None
        try:
            candidates = []
            for candidate in sample.candidates:
                prompt = self.tokenizer.tokenize(
                    sample.fill_prompt_with_candidate(candidate))
                prompt = self._make_prompt_with_shots(
                    prompt, shots_limit=shots_limit,
                    insert_start_token=insert_start_token,
                    min_shots=min_shots,
                    candidate=candidate,
                    seed=sample.seed
                )
                ppl_begin = self._get_query_pos(
                    prompt) if only_calculate_last else 0
                candidates.append(
                    PerplexityCandidate(
                        prompt, content=candidate, ppl_begin=ppl_begin
                    )
                )
            return PerplexityChoiceMakingTask(
                task_id=sample.sample_id,
                candidates=candidates
            )
        except AssertionError:
            return None

    def _get_query_pos(self, prompt: np.ndarray) -> int:
        paragraph_sep_token = self.tokenizer.paragraph_sep_token
        found = np.where(prompt == paragraph_sep_token[0])
        if len(found[0]) == 0:
            return 0
        if len(paragraph_sep_token) > 1:
            pl = len(prompt)
            sl = len(paragraph_sep_token)
            found = found[-1].tolist()
            found.reverse()
            for i in found:
                # 这里+1是判断匹配后有没有其他文本
                if i + sl >= pl:
                    continue
                err = False
                for j in range(1, sl):
                    if prompt[i + j] != paragraph_sep_token[j]:
                        err = True
                        break
                if err:
                    continue
                return i + sl
            return 0
        return found[-1][-1] + 1


class PerplexityPromptShotsClassificationTaskMaker(
    PerplexityPromptShotsChoiceMakingTaskMaker,
    metaclass=abc.ABCMeta
):
    def __init__(
            self,
            token_limit: int,
            tokenizer: BaseTokenizer,
            shots_generator: ShotsGenerator,
            label_map: List[str],
            first_prompt: np.ndarray = None
    ):
        super().__init__(
            token_limit=token_limit,
            tokenizer=tokenizer,
            shots_generator=shots_generator,
            first_prompt=first_prompt
        )
        self.__label_map = label_map

    @property
    def label_map(self) -> List[str]:
        return self.__label_map

    def _make_task(
            self, sample: PromptClassificationSample,
            insert_start_token: bool,
            only_calculate_last: bool = False,
            shots_limit: int = None,
            min_shots: int = None,
            **kwargs
    ) -> Union[PerplexityChoiceMakingTask, None]:
        if sample is None:
            return None
        try:
            candidates = []
            for label in self.label_map:
                prompt = self.tokenizer.tokenize(
                    sample.fill_prompt_with_label_content(label))
                prompt = self._make_prompt_with_shots(
                    prompt=prompt,
                    shots_limit=shots_limit,
                    min_shots=min_shots,
                    label=label,
                    seed=sample.seed,
                    insert_start_token=insert_start_token
                )
                ppl_begin = self._get_query_pos(
                    prompt) if only_calculate_last else 0
                candidates.append(
                    PerplexityCandidate(
                        prompt, content=label, ppl_begin=ppl_begin
                    )
                )
            return PerplexityChoiceMakingTask(
                task_id=sample.sample_id,
                candidates=candidates
            )
        except AssertionError:
            return None


class DirectPromptTaskMaker(ShotsPromptTaskMaker, metaclass=abc.ABCMeta):
    def __init__(
            self,
            token_limit: int,
            tokenizer: BaseTokenizer,
            shots_generator: ShotsGenerator,
            first_prompt: np.ndarray = None
    ):
        super().__init__(
            making_task_type=DirectPromptTask,
            token_limit=token_limit,
            tokenizer=tokenizer,
            shots_generator=shots_generator,
            first_prompt=first_prompt
        )

    def _make_task(
            self, sample: PromptSample,
            insert_start_token: bool,
            shots_limit: int = None,
            min_shots: int = None,
            **kwargs,
    ) -> Union[DirectPromptTask, None]:
        if sample is None:
            return None
        try:
            prompt = self.tokenizer.tokenize(sample.prompt)
            prompt = self._make_prompt_with_shots(
                prompt,
                shots_limit=shots_limit,
                min_shots=min_shots,
                seed=sample.seed,
                insert_start_token=insert_start_token,
                **kwargs
            )
            return DirectPromptTask(
                task_id=sample.sample_id,
                prompt=prompt
            )
        except AssertionError:
            return None
