from typing import List
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data.dataset import T_co
import torch
from utils.static_object import SentenceEvidence, SentenceEvidence_doc


class RationaleTrainDataset(Dataset):
    def __init__(self,
                 instance_list: List[SentenceEvidence],
                 tokenizer,
                 max_input_length: int,
                 use_context: bool,
                 use_marker: bool
                 ):
        self.instance_list = instance_list
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length
        self.context = use_context
        self.marker = use_marker

    def __len__(self) -> int:
        return len(self.instance_list)

    def __getitem__(self, index) -> T_co:
        sent_evi = self.instance_list[index]
        sep_token = self.tokenizer.sep_token

        input_str = sent_evi.query + sep_token + sent_evi.rationale
        context = ''
        if self.marker:
            query_str = '[query]' * 2 + sent_evi.query + '[\\query]' * 2
            ra_str = '[answer]' * 2 + sent_evi.rationale + '[\\answer]' * 2
            input_str = query_str + sep_token + ra_str
        if self.context:
            if sent_evi.context is not None:
                context = sep_token + sent_evi.context

        input_str += context
        inputs = self.tokenizer.encode_plus(text=input_str,
                                            add_special_tokens=True,
                                            return_token_type_ids=False,
                                            return_attention_mask=True,
                                            return_tensors='pt',
                                            max_length=self.max_input_length,
                                            truncation=True)

        input_ids = inputs['input_ids'].squeeze(0)
        attention_mask = inputs['attention_mask'].squeeze(0)
        return input_ids, attention_mask, sent_evi.ra_label


class RationaleTestDataset(Dataset):
    def __init__(self,
                 instance_list: List[SentenceEvidence_doc],
                 tokenizer,
                 max_input_length: int,
                 use_context: bool,
                 use_marker: bool
                 ):
        self.instance_list = instance_list
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length
        self.context = use_context
        self.marker = use_marker

    def __len__(self) -> int:
        return len(self.instance_list)

    def __getitem__(self, index):
        sent_evi = self.instance_list[index]
        sep_token = self.tokenizer.sep_token

        input_str = sent_evi.query + sep_token + sent_evi.rationale
        context = ''
        if self.marker:
            query_str = '[query]' * 2 + sent_evi.query + '[\\query]' * 2
            ra_str = '[answer]' * 2 + sent_evi.rationale + '[\\answer]' * 2
            input_str = query_str + sep_token + ra_str
        if self.context:
            if sent_evi.context is not None:
                context = sep_token + sent_evi.context

        input_str += context
        inputs = self.tokenizer.encode_plus(text=input_str,
                                                add_special_tokens=True,
                                                return_token_type_ids=False,
                                                return_attention_mask=True,
                                                return_tensors='pt',
                                                max_length=self.max_input_length,
                                                truncation=True)

        input_ids = inputs['input_ids'].squeeze(0)
        attention_mask = inputs['attention_mask'].squeeze(0)
        return input_ids, attention_mask, sent_evi.ra_label, sent_evi.query, \
               sent_evi.cls_label, sent_evi.doc_id, sent_evi.rationale


class ClassificationDataset(Dataset):
    def __init__(self,
                 instance_list: List[SentenceEvidence],
                 tokenizer,
                 max_input_length: int,
                 use_context: bool,
                 use_marker: bool
                 ):
        self.instance_list = instance_list
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length
        self.context = use_context
        self.marker = use_marker

    def __len__(self) -> int:
        return len(self.instance_list)

    def __getitem__(self, index) -> T_co:
        sent_evi = self.instance_list[index]
        sep_token = self.tokenizer.sep_token

        input_str = sent_evi.query + sep_token + sent_evi.rationale
        context = ''
        if self.marker:
            query_str = '[query]' * 2 + sent_evi.query + '[\\query]' * 2
            ra_str = '[rationale]' * 2 + sent_evi.rationale + '[\\rationale]' * 2
            input_str = query_str + sep_token + ra_str
        if self.context:
            if isinstance(sent_evi.context, list) and len(sent_evi.context) > 0:
                context = ' '.join(sent_evi.context)
                context = sep_token + context

        input_str += context
        inputs = self.tokenizer.encode_plus(text=input_str,
                                            add_special_tokens=True,
                                            return_token_type_ids=False,
                                            return_attention_mask=True,
                                            return_tensors='pt',
                                            max_length=self.max_input_length,
                                            truncation=True)

        input_ids = inputs['input_ids'].squeeze(0)
        attention_mask = inputs['attention_mask'].squeeze(0)
        return input_ids, attention_mask, sent_evi.cls_label


def collate_fn(batch):
    input_ids, att_mask, label= zip(*batch)
    input_ids = list(input_ids)
    att_mask = list(att_mask)

    input_ids = pad_sequence(input_ids, batch_first=True)
    att_mask = pad_sequence(att_mask, batch_first=True)
    return input_ids, att_mask, torch.tensor(label)


def collate_fn_joint(batch):
    input_ids, att_mask, ra_label, query, cls_label, doc_id, rationale = zip(*batch)
    input_ids = list(input_ids)
    att_mask = list(att_mask)

    input_ids = pad_sequence(input_ids, batch_first=True)
    att_mask = pad_sequence(att_mask, batch_first=True)
    return input_ids, att_mask, torch.tensor(ra_label), query, cls_label, doc_id, rationale
