from typing import AnyStr, List, Tuple
from transformers import PreTrainedTokenizer
import pandas as pd, numpy as np
import json, random, re, torch
from torch.utils.data import Dataset

Senti_domain_map = {
  'books':1,
  'dvd':2,
  'kitchen':3,
  'electronics':4
}

def DataSplit(dataset, length=[]):
    idxs = random.sample(list(range(len(dataset))), len(dataset))
    news_sets = [dataset.__class__() for _ in length]
    start_idx = 0
    for i, l in enumerate(length):
        news_sets[i].dataset = dataset.dataset.iloc[idxs[start_idx:start_idx+l]]
        news_sets[i].tokenizer = dataset.tokenizer
        start_idx += l
    return news_sets

def text_to_batch_transformer(text: List, tokenizer: PreTrainedTokenizer, text_pair: List = None):
    """Turn a piece of text into a batch for transformer model

    :param text: The text to tokenize and encode
    :param tokenizer: The tokenizer to use
    :param: text_pair: An optional second string (for multiple sentence sequences)
    :return: A list of IDs and a mask
    """
    max_len = tokenizer.max_len if hasattr(tokenizer, 'max_len') else tokenizer.model_max_length
    if text_pair is None:
        items = [tokenizer.encode_plus(sent, text_pair=None, add_special_tokens=True, max_length=max_len,
                                       return_length=False, return_attention_mask=True,
                                       return_token_type_ids=True)
                 for sent in text]
    else:
        assert len(text) == len(text_pair)
        items = [tokenizer.encode_plus(s1, text_pair=s2, add_special_tokens=True, max_length=max_len,
                                        return_length=False, return_attention_mask=True,
                                            return_token_type_ids=True)
                                        for s1, s2 in zip(text, text_pair)]
    return [item['input_ids'] for item in items], \
              [item['attention_mask'] for item in items], \
                 [item['token_type_ids'] for item in items]

def collate_Senti_batch_with_device(device):
    def collate_batch_transformer(input_data: Tuple):
        input_ids = [i[0][0] for i in input_data]
        masks = [i[1][0] for i in input_data]
        seg_ids = [i[2][0] for i in input_data]
        labels = [i[3] for i in input_data]
        domains = [i[4] for i in input_data]

        max_length = max([len(i) for i in input_ids])

        input_ids = [(i + [0] * (max_length - len(i))) for i in input_ids]
        masks = [(m + [0] * (max_length - len(m))) for m in masks]
        seg_ids = [(s + [0] * (max_length - len(s))) for s in seg_ids]

        assert (all(len(i) == max_length for i in input_ids))
        assert (all(len(m) == max_length for m in masks))
        assert (all(len(s) == max_length for s in seg_ids))
        return torch.tensor(input_ids, device=device), torch.tensor(masks, device=device), \
                    torch.tensor(seg_ids, device=device), torch.tensor(labels, device=device), \
                        torch.tensor(domains, device=device)
    return collate_batch_transformer

def collate_batch_transformer(input_data: Tuple):
    input_ids = [i[0][0] for i in input_data]
    masks = [i[1][0] for i in input_data]
    labels = [i[2] for i in input_data]
    domains = [i[3] for i in input_data]

    max_length = max([len(i) for i in input_ids])

    input_ids = [(i + [0] * (max_length - len(i))) for i in input_ids]
    masks = [(m + [0] * (max_length - len(m))) for m in masks]

    assert (all(len(i) == max_length for i in input_ids))
    assert (all(len(m) == max_length for m in masks))
    return torch.tensor(input_ids), torch.tensor(masks), \
           torch.tensor(labels), torch.tensor(domains)

def collate_batch_transformer_with_index(input_data: Tuple):
    return collate_batch_transformer(input_data) + ([i[-1] for i in input_data],)

def read_senti(txt_path: AnyStr):
    """ Convert all of the ratings in amazon product XML file to dicts

    :param xml_file: The XML file to convert to a dict
    :return: All of the rows in the xml file as dicts
    """
    reviews = []
    domain_name = txt_path.rsplit('/', 2)[1]
    with open(txt_path, encoding='utf8', errors='ignore') as f:
        for line in f:
            s = line.strip("\n").split(' ', 1)
            reviews.append({'sent' : s[1],
                            'label' : int(s[0]),
                            'domain' : Senti_domain_map[domain_name]})
    return reviews

def transIrregularWord(line):
    if not line:
        return ''
    line.lower()
    line = re.sub("@[^ \n\t]*", "", line)
    line = re.sub("#[^ \n\t]*", "", line)
    line = re.sub("http(.?)://[^ ]*", "", line)
    return line


class ClsDataset(Dataset):
    """
    Implements a dataset for the multidomain sentiment analysis dataset
    """
    def __init__(
            self,
            tokenizer: PreTrainedTokenizer = None,
    ):
        """

        :param dataset_dir: The base directory for the dataset
        :param domains: The set of domains to load data for
        :param: tokenizer: The tokenizer to use
        :param: domain_ids: A list of ids to override the default domain IDs
        """
        super(ClsDataset, self).__init__()
        self.dataset = pd.DataFrame()
        self.tokenizer = tokenizer

    def set_domain_id(self, domain_id):
        """
        Overrides the domain ID for all data
        :param domain_id:
        :return:
        """
        self.dataset['domain'] = domain_id

    def __len__(self):
        return self.dataset.shape[0]


class SentiDataset(Dataset):
    """
    Implements a dataset for the multidomain sentiment analysis dataset
    """
    def __init__(
            self,
            dataset_path: AnyStr = None,
            domains: List = None,
            tokenizer: PreTrainedTokenizer = None,
            domain_ids: List = None,
            max_data_size: int = -1,
            load_data = True
    ):
        """
        :param dataset_dir: The base directory for the dataset
        :param domains: The set of domains to load data for
        :param: tokenizer: The tokenizer to use
        :param: domain_ids: A list of ids to override the default domain IDs
        """
        super(SentiDataset, self).__init__()
        self.tokenizer = tokenizer
        self.dataset_path = dataset_path
        self.max_data_size = max_data_size
        self.domains, self.domain_ids = domains, domain_ids
        self.sents, self.read_indexs = None, None
        self._label, self._confidence, self._entrophy = None, None, None
        device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        self.collate_raw_batch = collate_Senti_batch_with_device(device)
        if load_data:
            self.load_data()

    def load_data(self):
        dataset = self.obtainDataset(self.dataset_path, data_size=self.max_data_size)
        if self.domain_ids is not None and self.domains is not None:
            for i in range(len(self.domain_ids)):
                dataset[dataset['domain'] == Senti_domain_map[self.domains[i]]][2] = self.domain_ids[i]
        self.sents = dataset['sent'].values
        self._label = dataset['label'].values
        self._confidence = torch.ones(len(self._label))
        self._entrophy = torch.zeros(len(self._label))
        self.domain = dataset['domain'].values
        self.read_indexs = np.arange(len(self.domain))

    @property
    def label(self):
        return self._label[self.read_indexs]

    def setLabel(self, label, idxs):
        indexs = self.read_indexs[idxs]
        self._label[indexs] = label

    @property
    def confidence(self):
        return self._confidence[self.read_indexs]

    def setConfidence(self, confidence, idxs):
        indexs = self.read_indexs[idxs]
        self._confidence[indexs] = confidence

    @property
    def entrophy(self):
        return self._entrophy[self.read_indexs]

    def setEntrophy(self, entrophy, idxs):
        indexs = self.read_indexs[idxs]
        self._entrophy[indexs] = entrophy

    def labelTensor(self, device=None):
        return torch.tensor(self._label[self.read_indexs], device=device)

    def __len__(self):
        return len(self.read_indexs)

    def domainSelect(self, domain_id):
        if not domain_id in self.domain:
            print(f"Select domain failed, due to the domain_id {domain_id} is missed in data domains")
        read_indexs = np.arange(len(self.domain))
        self.valid_domain_id = domain_id
        self.read_indexs = read_indexs[self.domain.__eq__(domain_id)]

    def obtainDataset(self, dataset_path, data_size=-1):
        df_list = [pd.DataFrame(read_senti(fname)) for fname in dataset_path]
        big_df = pd.concat(df_list)
        if data_size > 0:
            return big_df.sample(n=data_size)
        return big_df.sample(frac=1.0)

    def __getitem__(self, item) -> Tuple:
        idx = self.read_indexs[item]
        input_ids, mask, seg_ids = text_to_batch_transformer([self.sents[idx]],
                                                              self.tokenizer,
                                                              text_pair=None)
        return input_ids, mask, seg_ids, self._label[idx], \
                        self.domain[idx]-1, item

class SentiDatasetV2(SentiDataset):
    """
    Implements a dataset for the multidomain sentiment analysis dataset
    """
    def __init__(
            self,
            dataset_path: List = None,
            domains: List = None,
            tokenizer: PreTrainedTokenizer = None,
            domain_ids: List = None,
            max_data_size: int = -1,
            load_data=False
    ):
        """
        :param dataset_dir: The base directory for the dataset
        :param domains: The set of domains to load data for
        :param: tokenizer: The tokenizer to use
        :param: domain_ids: A list of ids to override the default domain IDs
        """
        super(SentiDatasetV2, self).__init__(dataset_path, domains, tokenizer,
                                                domain_ids, max_data_size, load_data)

    def load(self):
        dataset = self.obtainDataset(self.dataset_path, data_size=self.max_data_size)
        if self.domain_ids is not None and self.domains is not None:
            for i in range(len(self.domain_ids)):
                dataset[dataset['domain'] == Senti_domain_map[self.domains[i]]][2] = self.domain_ids[i]
        self.sents = dataset['sent'].values
        self._label = dataset['label'].values
        self._confidence = torch.ones(len(self._label))
        self._entrophy = torch.zeros(len(self._label))
        self.domain = dataset['domain'].values
        self.read_indexs = np.arange(len(self.domain))

    def Derive(self, idxs:List):
        new_set = self.__class__(None, self.domains, self.tokenizer, self.domain_ids, -1)
        real_idxs = [self.read_indexs[idx] for idx in idxs]
        new_set.read_indexs = np.arange(len(real_idxs))
        new_set.initLabel(self.label[real_idxs].copy())
        new_set.initConfidence(self.confidence[real_idxs].clone())
        new_set.initEntrophy(self.entrophy[real_idxs].clone())

        new_set.sents = self.sents[real_idxs].copy()
        new_set.domain = self.domain[real_idxs]

        self.sents, self.domain = np.delete(self.sents, real_idxs), \
                                    np.delete(self.domain, real_idxs)
        self._label, self._confidence, self._entrophy = np.delete(self._label, real_idxs), \
                                                          np.delete(self._confidence, real_idxs), \
                                                            np.delete(self._entrophy, real_idxs)
        self.read_indexs = np.arange(len(self.domain))
        if hasattr(self, 'valid_domain_id'):
            self.domainSelect(self.valid_domain_id)
        return new_set

    def Merge(self, another_set:SentiDataset):
        another_premise = another_set.sents[another_set.read_indexs]
        another_domain = another_set.domain[another_set.read_indexs]
        self.sents, self.domain = np.concatenate([self.sents, another_premise]), \
                                                    np.concatenate([self.domain, another_domain])
        self._label, self._confidence, self._entrophy = np.concatenate([self._label, another_set.label]), \
                                                            np.concatenate([self._confidence, another_set.confidence]), \
                                                              np.concatenate([self._entrophy, another_set.entrophy])
        extend_idxs = np.arange(len(self._label), len(self._label) + len(another_set), 1)
        self.read_indexs = np.concatenate(
            [self.read_indexs, extend_idxs]
        )

    def initLabel(self, label):
        assert len(self.read_indexs) == len(label)
        self._label = label

    def initConfidence(self, confidence):
        assert len(self.read_indexs) == len(confidence)
        self._confidence = confidence

    def initEntrophy(self, entrophy):
        assert len(self.read_indexs) == len(entrophy)
        self._entrophy = entrophy
