from os.path import join, abspath
from typing import List
import json

from backend.experiment.framework.shots_prompt.task import \
    PerplexityPromptShotsClassificationTaskMaker
from backend.experiment.framework.task import TaskMaker
from backend.experiment.tokenizer import tokenizer
from backend.experiment.framework.shots_prompt.implement import \
    CommonPerplexityClassificationImplement, \
    CommonShotsGenerator, \
    CommonPromptClassificationSample
from backend.experiment.model import context_limit


class CMNLI2020Implement(CommonPerplexityClassificationImplement):
    __mask_token = '[MASK]'

    @classmethod
    def label_map(cls) -> List[str]:
        return ['无关的', '相关的', '矛盾的']

    @classmethod
    def _get_record_label(cls, record) -> int:
        if record['label'] == 'neutral':
            return 0
        elif record['label'] == 'contradiction':
            return 2
        elif record['label'] == 'entailment':
            return 1
        raise NotImplementedError

    @classmethod
    def _make_test_sample(cls, record, made_sample_num: int) -> \
            CommonPromptClassificationSample or \
            List[CommonPromptClassificationSample]:
        prompt = cls._make_prompt(record, True)
        sample_id = made_sample_num
        if 'id' in record.keys():
            sample_id = record['id']
        return CommonPromptClassificationSample(
            prompt=prompt,
            target_mask_token=cls.__mask_token,
            sample_id=sample_id
        )

    @classmethod
    def _read_train_records(cls) -> list:
        with open(join(cls.data_dir(), 'train.json'), 'r', encoding='utf8') \
                as file:
            records = [json.loads(each) for each in file.readlines()]
        return records

    @classmethod
    def _read_dev_records(cls) -> list:
        with open(join(cls.data_dir(), 'dev.json'), 'r', encoding='utf8') \
                as file:
            records = [json.loads(each) for each in file.readlines()]
        return records

    @classmethod
    def _read_test_records(cls) -> list:
        with open(join(cls.data_dir(), 'test.json'), 'r', encoding='utf8') \
                as file:
            records = [json.loads(each) for each in file.readlines()]
        return records

    @classmethod
    def _make_prompt(cls, record, mask_target: bool) -> str or List[str]:
        s1 = record['sentence1']
        s2 = record['sentence2']
        if mask_target:
            label = cls.__mask_token
        else:
            label = record['label']
            label = 2 if label == 'contradiction' else \
                1 if label == 'entailment' else 0
            label = cls.label_map()[label]
        return f'句子：“{s1}”和句子“{s2}”是{label}'

    def get_task_maker(self) -> TaskMaker:
        return PerplexityPromptShotsClassificationTaskMaker(
            token_limit=context_limit,
            tokenizer=tokenizer,
            shots_generator=CommonShotsGenerator(
                length_sorted_example_tokens=self.get_sorted_example_tokens()
            ),
            label_map=self.label_map()
        )

    @classmethod
    def data_dir(cls) -> str:
        return abspath(join(__file__, '../..'))

    @classmethod
    def cache_dir(cls) -> str:
        return join(cls.work_dir(), 'cache')

    @classmethod
    def work_dir(cls) -> str:
        return abspath(join(__file__, '..'))
