from typing import List
from torch.utils.data import Dataset
from torch.utils.data.dataset import T_co
from utils.static_object import SentenceEvidence, SentenceEvidence_doc


class BaseTrainRationaleDataset(Dataset):
    def __init__(self,
                 instance_list: List[SentenceEvidence],
                 tokenizer,
                 max_input_length: int,
                 ):
        self.instance_list = instance_list
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length

    def __len__(self) -> int:
        return len(self.instance_list)

    def __getitem__(self, index) -> T_co:
        sent_evi = self.instance_list[index]
        sep_token = self.tokenizer.sep_token

        input_str = sent_evi.query + sep_token + sent_evi.rationale
        inputs = self.tokenizer.encode_plus(text=input_str,
                                            add_special_tokens=True,
                                            return_token_type_ids=False,
                                            return_attention_mask=True,
                                            return_tensors='pt',
                                            max_length=self.max_input_length,
                                            truncation=True)

        input_ids = inputs['input_ids'].squeeze(0)
        attention_mask = inputs['attention_mask'].squeeze(0)
        return input_ids, attention_mask, sent_evi.ra_label


class BaseTestRationaleDataset(Dataset):
    def __init__(self,
                 instance_list: List[SentenceEvidence_doc],
                 tokenizer,
                 max_input_length: int,
                 ):
        self.instance_list = instance_list
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length

    def __len__(self) -> int:
        return len(self.instance_list)

    def __getitem__(self, index) -> T_co:
        sent_evi = self.instance_list[index]
        sep_token = self.tokenizer.sep_token

        input_str = sent_evi.query + sep_token + sent_evi.rationale
        inputs = self.tokenizer.encode_plus(text=input_str,
                                            add_special_tokens=True,
                                            return_token_type_ids=False,
                                            return_attention_mask=True,
                                            return_tensors='pt',
                                            max_length=self.max_input_length,
                                            truncation=True)

        input_ids = inputs['input_ids'].squeeze(0)
        attention_mask = inputs['attention_mask'].squeeze(0)
        return input_ids, attention_mask, sent_evi.ra_label, sent_evi.query, \
               sent_evi.cls_label, sent_evi.doc_id, sent_evi.rationale


class BaseClassificationDataset(Dataset):
    def __init__(self,
                 instance_list: List[SentenceEvidence],
                 tokenizer,
                 max_input_length: int,
                 ):
        self.instance_list = instance_list
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length

    def __len__(self) -> int:
        return len(self.instance_list)

    def __getitem__(self, index) -> T_co:
        sent_evi = self.instance_list[index]
        sep_token = self.tokenizer.sep_token

        # input form: [CLS] + query + [SEP] + rationale
        input_str = sent_evi.query + sep_token + sent_evi.rationale
        inputs = self.tokenizer.encode_plus(text=input_str,
                                            add_special_tokens=True,
                                            return_token_type_ids=False,
                                            return_attention_mask=True,
                                            return_tensors='pt',
                                            max_length=self.max_input_length,
                                            truncation=True)

        input_ids = inputs['input_ids'].squeeze(0)
        attention_mask = inputs['attention_mask'].squeeze(0)
        return input_ids, attention_mask, sent_evi.cls_label