from os.path import join, abspath
from typing import List
import json

from backend.experiment.framework.shots_prompt.task import \
    DirectPromptTaskMaker
from backend.experiment.framework.data import PromptSample
from backend.experiment.framework.task import TaskMaker
from backend.experiment.tokenizer import tokenizer
from backend.experiment.framework.shots_prompt.implement import \
    CommonGenerationImplement, \
    CommonShotsGenerator
from backend.experiment.model import context_limit


class CMRC2018Implement(CommonGenerationImplement):
    __max_generate_count = -1
    __mask_token = '[MASK]'

    @classmethod
    def max_generate_count(cls) -> int:
        if cls.__max_generate_count == -1:
            records = cls._read_train_records()
            for each in records:
                for p in each['paragraphs']:
                    for q in p['qas']:
                        for a in q['answers']:
                            cls.__max_generate_count = max(
                                cls.__max_generate_count,
                                len(a['text'])
                            )
            print('problem max generate count:', cls.__max_generate_count)
        return cls.__max_generate_count

    @classmethod
    def _make_test_sample(cls, record, made_sample_num: int) -> \
            List[PromptSample]:
        res = []
        paragraphs = record['paragraphs']
        for paragraph in paragraphs:
            context = paragraph['context']
            questions = paragraph['qas']

            for q in questions:
                question = q['question']
                complete = cls.__make_single_prompt(context, question, '')
                assert len(complete) < 1024, f'prompt: {complete}'
                res.append(
                    PromptSample(
                        prompt=complete,
                        sample_id=made_sample_num
                    )
                )
                made_sample_num += 1
        return res

    @classmethod
    def _read_train_records(cls) -> list:
        with open(join(cls.data_dir(), 'train.json'), 'r', encoding='utf8') \
                as file:
            records = json.loads(file.read())['data']
        return records

    @classmethod
    def _read_dev_records(cls) -> list:
        with open(join(cls.data_dir(), 'dev.json'), 'r',
                  encoding='utf8') as file:
            records = json.loads(file.read())['data']
        return records

    @classmethod
    def _read_test_records(cls) -> list:
        with open(join(cls.data_dir(), 'test.json'), 'r',
                  encoding='utf8') as file:
            records = json.loads(file.read())['data']
        return records

    @classmethod
    def _make_prompt(cls, record, mask_target: bool) -> List[str]:
        res = []
        paragraphs = record['paragraphs']
        for paragraph in paragraphs:
            context = paragraph['context']
            questions = paragraph['qas']
            for q in questions:
                question = q['question']
                if mask_target:
                    answer = cls.__mask_token
                else:
                    answer = q['answers'][0]['text']
                complete = cls.__make_single_prompt(context, question, answer)
                res.append(complete)
        return res

    def get_task_maker(self) -> TaskMaker:
        return DirectPromptTaskMaker(
            token_limit=context_limit,
            tokenizer=tokenizer,
            shots_generator=CommonShotsGenerator(
                length_sorted_example_tokens=self.get_sorted_example_tokens()
            )
        )

    @classmethod
    def data_dir(cls) -> str:
        return abspath(join(__file__, '../..'))

    @classmethod
    def cache_dir(cls) -> str:
        return join(cls.work_dir(), 'cache')

    @classmethod
    def work_dir(cls) -> str:
        return abspath(join(__file__, '..'))

    @classmethod
    def __make_single_prompt(cls, context: str, question: str, answer: str):
        return f'阅读文本：“{context}”问“{question}”答：{answer}'
