from typing import List
from torch.utils.data import Dataset
from collections import namedtuple
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data.dataset import T_co
import torch
from utils.static_object import SentenceEvidence, SentenceEvidence_doc


class RationaleDataset(Dataset):
    def __init__(self,
                 instance_list: List[SentenceEvidence],
                 tokenizer,
                 max_input_length: int,
                 ):
        self.instance_list = instance_list
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length

    def __len__(self) -> int:
        return len(self.instance_list)

    def __getitem__(self, index) -> T_co:
        sent_evi = self.instance_list[index]
        sep_token = self.tokenizer.sep_token
        # context_len = self.max_input_length - len(sent_evi.query) - \
        #               len(sent_evi.rationale) - len(sent_evi.cls_label) - 6

        # input form: [CLS] + context + [SEP] + query + [SEP] + rationale + [SEP] + cls_label + [SEP]
        if sent_evi.context is None:
            context = ''
        # elif len(sent_evi.context) > context_len:
        #     context = sent_evi.context[:context_len]
        else:
            context = sent_evi.context

        input_str = sent_evi.query + sep_token + sent_evi.rationale + sep_token + \
                    sent_evi.cls_label + sep_token + context
        inputs = self.tokenizer.encode_plus(text=input_str,
                                            add_special_tokens=True,
                                            return_token_type_ids=False,
                                            return_attention_mask=True,
                                            return_tensors='pt',
                                            max_length=self.max_input_length,
                                            truncation=True)

        input_ids = inputs['input_ids'].squeeze(0)
        attention_mask = inputs['attention_mask'].squeeze(0)
        return input_ids, attention_mask, sent_evi.ra_label


def collate_fn(batch):
    input_ids, att_mask, label= zip(*batch)
    input_ids = list(input_ids)
    att_mask = list(att_mask)

    input_ids = pad_sequence(input_ids, batch_first=True)
    att_mask = pad_sequence(att_mask, batch_first=True)
    return input_ids, att_mask, torch.tensor(label)


class ClassificationDataset(Dataset):
    def __init__(self,
                 instance_list: List[SentenceEvidence],
                 tokenizer,
                 max_input_length: int,
                 ):
        self.instance_list = instance_list
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length

    def __len__(self) -> int:
        return len(self.instance_list)

    def __getitem__(self, index) -> T_co:
        sent_evi = self.instance_list[index]
        sep_token = self.tokenizer.sep_token
        # context_len = self.max_input_length - len(sent_evi.query) - \
        #               len(sent_evi.rationale) - 4

        if sent_evi.context is None:
            context = ''
        else:
            # sent_evi.context is List, elements are sentence(str)
            context = ' '.join(sent_evi.context)
            # if len(context) > context_len:
            #     context = context[:context_len]

        # input form: [CLS] + query + [SEP] + rationale + [SEP] + context
        input_str = sent_evi.query + sep_token + sent_evi.rationale + sep_token + \
                    sep_token + context
        inputs = self.tokenizer.encode_plus(text=input_str,
                                            add_special_tokens=True,
                                            return_token_type_ids=False,
                                            return_attention_mask=True,
                                            return_tensors='pt',
                                            max_length=self.max_input_length,
                                            truncation=True)

        input_ids = inputs['input_ids'].squeeze(0)
        attention_mask = inputs['attention_mask'].squeeze(0)
        if attention_mask.shape == (2, 1136):
            print(111)
        return input_ids, attention_mask, sent_evi.cls_label


class RationaleDataset_ide2cls(Dataset):
    def __init__(self,
                 instance_list: List[SentenceEvidence_doc],
                 tokenizer,
                 max_input_length: int,
                 ):
        self.instance_list = instance_list
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length

    def __len__(self) -> int:
        return len(self.instance_list)

    def __getitem__(self, index) -> T_co:
        sent_evi = self.instance_list[index]
        sep_token = self.tokenizer.sep_token
        # context_len = self.max_input_length - len(sent_evi.query) - \
        #               len(sent_evi.rationale) - len(sent_evi.cls_label) - 6

        # input form: [CLS] + context + [SEP] + query + [SEP] + rationale + [SEP] + cls_label + [SEP]
        if sent_evi.context is None:
            context = ''
        # elif len(sent_evi.context) > context_len:
        #     context = sent_evi.context[:context_len]
        else:
            context = sent_evi.context

        input_str = sent_evi.query + sep_token + sent_evi.rationale + sep_token + \
                    sent_evi.cls_label + sep_token + context
        inputs = self.tokenizer.encode_plus(text=input_str,
                                            add_special_tokens=True,
                                            return_token_type_ids=False,
                                            return_attention_mask=True,
                                            return_tensors='pt',
                                            max_length=self.max_input_length,
                                            truncation=True)

        input_ids = inputs['input_ids'].squeeze(0)
        attention_mask = inputs['attention_mask'].squeeze(0)
        return input_ids, attention_mask, sent_evi.ra_label, sent_evi.query, \
               sent_evi.cls_label, sent_evi.doc_id, sent_evi.rationale


def collate_fn_ide2cls(batch):
    input_ids, att_mask, ra_label, query, cls_label, doc_id, rationale = zip(*batch)
    input_ids = list(input_ids)
    att_mask = list(att_mask)

    input_ids = pad_sequence(input_ids, batch_first=True)
    att_mask = pad_sequence(att_mask, batch_first=True)
    return input_ids, att_mask, torch.tensor(ra_label), query, cls_label, doc_id, rationale

class MarkerRationaleDataset(Dataset):
    def __init__(self,
                 instance_list: List[SentenceEvidence],
                 tokenizer,
                 max_input_length: int,
                 ):
        self.instance_list = instance_list
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length

    def __len__(self) -> int:
        return len(self.instance_list)

    def __getitem__(self, index) -> T_co:
        sent_evi = self.instance_list[index]
        sep_token = self.tokenizer.sep_token

        # input form: [CLS] + context + [SEP] + query + [SEP] + rationale + [SEP] + cls_label + [SEP]
        if sent_evi.context is None:
            context = ''
        else:
            context = sent_evi.context

        # query_str = ''
        # for token in sent_evi.query.split():
        #     if token.isalpha():
        #         query_str = query_str + '[query]' + token + '[\\query]'
        # ra_str = ''
        # for token in sent_evi.rationale.split():
        #     if token.isalpha():
        #         ra_str = ra_str + '[answer]' + token + '[\\answer]'
        cls_label_str = '[class]' + sent_evi.cls_label + '[\\class]'
        query_str = '[query]'*2 + sent_evi.query + '[\\query]'*2
        ra_str = '[answer]'*2 + sent_evi.rationale + '[\\answer]'*2

        input_str = query_str + sep_token + ra_str + sep_token + cls_label_str \
                    + sep_token + context
        inputs = self.tokenizer.encode_plus(text=input_str,
                                            add_special_tokens=True,
                                            return_token_type_ids=False,
                                            return_attention_mask=True,
                                            return_tensors='pt',
                                            max_length=self.max_input_length,
                                            truncation=True)

        input_ids = inputs['input_ids'].squeeze(0)
        attention_mask = inputs['attention_mask'].squeeze(0)
        return input_ids, attention_mask, sent_evi.ra_label

class MarkedAllClassificationDataset(Dataset):
    def __init__(self,
                 instance_list: List[SentenceEvidence],
                 tokenizer,
                 max_input_length: int,
                 ):
        self.instance_list = instance_list
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length

    def __len__(self) -> int:
        return len(self.instance_list)

    def __getitem__(self, index) -> T_co:
        sent_evi = self.instance_list[index]
        sep_token = self.tokenizer.sep_token

        if sent_evi.context is None:
            context = ''
        else:
            # sent_evi.context is List, elements are sentence(str)
            context = ' '.join(sent_evi.context)

        query_str = ''
        for token in sent_evi.query.split():
            if token.isalpha():
                query_str = query_str + '[query]' + token + '[\\query]'
        ra_str = ''
        for token in sent_evi.rationale.split():
            if token.isalpha():
                ra_str = ra_str + '[rationale]' + token + '[\\rationale]'

        # input form: [CLS] + query + [SEP] + rationale + [SEP] + context
        input_str = query_str + sep_token + ra_str + sep_token + \
                    sep_token + context
        inputs = self.tokenizer.encode_plus(text=input_str,
                                            add_special_tokens=True,
                                            return_token_type_ids=False,
                                            return_attention_mask=True,
                                            return_tensors='pt',
                                            max_length=self.max_input_length,
                                            truncation=True)

        input_ids = inputs['input_ids'].squeeze(0)
        attention_mask = inputs['attention_mask'].squeeze(0)
        return input_ids, attention_mask, sent_evi.cls_label

class MarkerRationaleDataset_ide2cls(Dataset):
    def __init__(self,
                 instance_list: List[SentenceEvidence_doc],
                 tokenizer,
                 max_input_length: int,
                 ):
        self.instance_list = instance_list
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length

    def __len__(self) -> int:
        return len(self.instance_list)

    def __getitem__(self, index) -> T_co:
        sent_evi = self.instance_list[index]
        sep_token = self.tokenizer.sep_token

        # input form: [CLS] + context + [SEP] + query + [SEP] + rationale + [SEP] + cls_label + [SEP]
        if sent_evi.context is None:
            context = ''
        else:
            context = sent_evi.context

        # query_str = ''
        # for token in sent_evi.query.split():
        #     if token.isalpha():
        #         query_str = query_str + '[query]' + token + '[\\query]'
        # ra_str = ''
        # for token in sent_evi.rationale.split():
        #     if token.isalpha():
        #         ra_str = ra_str + '[answer]' + token + '[\\answer]'
        cls_label_str = '[class]' + sent_evi.cls_label + '[\\class]'
        query_str = '[query]' * 2 + sent_evi.query + '[\\query]' * 2
        ra_str = '[answer]' * 2 + sent_evi.rationale + '[\\answer]' * 2

        input_str = query_str + sep_token + ra_str + sep_token + \
                    cls_label_str + sep_token + context
        inputs = self.tokenizer.encode_plus(text=input_str,
                                            add_special_tokens=True,
                                            return_token_type_ids=False,
                                            return_attention_mask=True,
                                            return_tensors='pt',
                                            max_length=self.max_input_length,
                                            truncation=True)

        input_ids = inputs['input_ids'].squeeze(0)
        attention_mask = inputs['attention_mask'].squeeze(0)
        return input_ids, attention_mask, sent_evi.ra_label, sent_evi.query, \
               sent_evi.cls_label, sent_evi.doc_id, sent_evi.rationale

class MarkerClassificationDataset1(Dataset):
    def __init__(self,
                 instance_list: List[SentenceEvidence],
                 tokenizer,
                 max_input_length: int,
                 ):
        self.instance_list = instance_list
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length

    def __len__(self) -> int:
        return len(self.instance_list)

    def __getitem__(self, index) -> T_co:
        sent_evi = self.instance_list[index]
        sep_token = self.tokenizer.sep_token

        if sent_evi.context is None:
            context = ''
        else:
            # sent_evi.context is List, elements are sentence(str)
            context = ' '.join(sent_evi.context)

        # query_str = ''
        # for token in sent_evi.query.split():
        #     if token.isalpha():
        #         query_str = query_str + '[query]' + token + '[\\query]'
        # ra_str = ''
        # for token in sent_evi.rationale.split():
        #     if token.isalpha():
        #         ra_str = ra_str + '[rationale]' + token + '[\\rationale]'

        query_str = '[query]'*2 + sent_evi.query + '[\\query]'*2
        ra_str = '[rationale]'*2 + sent_evi.rationale + '[\\rationale]'*2

        # input form: [CLS] + query + [SEP] + rationale + [SEP] + context
        input_str = query_str + sep_token + ra_str + sep_token + \
                    sep_token + context
        inputs = self.tokenizer.encode_plus(text=input_str,
                                            add_special_tokens=True,
                                            return_token_type_ids=False,
                                            return_attention_mask=True,
                                            return_tensors='pt',
                                            max_length=self.max_input_length,
                                            truncation=True)

        input_ids = inputs['input_ids'].squeeze(0)
        attention_mask = inputs['attention_mask'].squeeze(0)
        return input_ids, attention_mask, sent_evi.cls_label