from typing import AnyStr, List, Tuple
from transformers import PreTrainedTokenizer
import unicodedata
import pandas as pd
import json
import glob
import random
import time
import re
import numpy as np
import torch
from torch.utils.data import Dataset
import os

NLI_domain_map = { 
  'government':1, 
  'slate':2, 
  'telephone':3, 
  'travel':4,
  'fiction':5,
  'letters':6, 
  'nineeleven':7, 
  'oup':8, 
  'verbatim':9,
  'facetoface':10,
}

def DataSplit(dataset, length=[]):
    idxs = random.sample(list(range(len(dataset))), len(dataset))
    news_sets = [dataset.__class__() for _ in length]
    start_idx = 0
    for i, l in enumerate(length):
        news_sets[i].dataset = dataset.dataset.iloc[idxs[start_idx:start_idx+l]]
        news_sets[i].tokenizer = dataset.tokenizer
        start_idx += l
    return news_sets

def text_to_batch_transformer(text: List, tokenizer: PreTrainedTokenizer, text_pair: List = None):
    """Turn a piece of text into a batch for transformer model

    :param text: The text to tokenize and encode
    :param tokenizer: The tokenizer to use
    :param: text_pair: An optional second string (for multiple sentence sequences)
    :return: A list of IDs and a mask
    """
    max_len = tokenizer.max_len if hasattr(tokenizer, 'max_len') else tokenizer.model_max_length
    if text_pair is None:
        input_ids = [tokenizer.encode(t, add_special_tokens=True, max_length=max_len) for t in text]
        masks = [[1] * len(i) for i in input_ids]
        return input_ids, masks
    else:
        assert len(text) == len(text_pair)
        items = [tokenizer.encode_plus(s1, text_pair=s2, add_special_tokens=True, max_length=max_len,
                                        return_length=False, return_attention_mask=True,
                                            return_token_type_ids=True)
                                        for s1, s2 in zip(text, text_pair)]
        return [item['input_ids'] for item in items], \
                  [item['attention_mask'] for item in items], \
                     [item['token_type_ids'] for item in items]

def collate_SNLI_batch_with_device(device):
    def collate_batch_transformer(input_data: Tuple):
        input_ids = [i[0][0] for i in input_data]
        masks = [i[1][0] for i in input_data]
        seg_ids = [i[2][0] for i in input_data]
        labels = [i[3] for i in input_data]
        domains = [i[4] for i in input_data]

        max_length = max([len(i) for i in input_ids])

        input_ids = [(i + [0] * (max_length - len(i))) for i in input_ids]
        masks = [(m + [0] * (max_length - len(m))) for m in masks]
        seg_ids = [(s + [0] * (max_length - len(s))) for s in seg_ids]

        assert (all(len(i) == max_length for i in input_ids))
        assert (all(len(m) == max_length for m in masks))
        assert (all(len(s) == max_length for s in seg_ids))
        return torch.tensor(input_ids, device=device), torch.tensor(masks, device=device), \
                    torch.tensor(seg_ids, device=device), torch.tensor(labels, device=device), \
                        torch.tensor(domains, device=device)
    return collate_batch_transformer

def collate_batch_transformer(input_data: Tuple):
    input_ids = [i[0][0] for i in input_data]
    masks = [i[1][0] for i in input_data]
    labels = [i[2] for i in input_data]
    domains = [i[3] for i in input_data]

    max_length = max([len(i) for i in input_ids])

    input_ids = [(i + [0] * (max_length - len(i))) for i in input_ids]
    masks = [(m + [0] * (max_length - len(m))) for m in masks]

    assert (all(len(i) == max_length for i in input_ids))
    assert (all(len(m) == max_length for m in masks))
    return torch.tensor(input_ids), torch.tensor(masks), \
           torch.tensor(labels), torch.tensor(domains)

def collate_batch_transformer_with_device(device):
    def collate_batch_transformer(input_data: Tuple):
        input_ids = [i[0][0] for i in input_data]
        masks = [i[1][0] for i in input_data]
        labels = [i[2] for i in input_data]
        domains = [i[3] for i in input_data]

        max_length = max([len(i) for i in input_ids])

        input_ids = [(i + [0] * (max_length - len(i))) for i in input_ids]
        masks = [(m + [0] * (max_length - len(m))) for m in masks]

        assert (all(len(i) == max_length for i in input_ids))
        assert (all(len(m) == max_length for m in masks))
        return torch.tensor(input_ids, device=device), torch.tensor(masks, device=device), \
                    torch.tensor(labels, device=device), torch.tensor(domains, device=device)
    return collate_batch_transformer

def collate_batch_transformer_with_index(input_data: Tuple):
    return collate_batch_transformer(input_data) + ([i[-1] for i in input_data],)

def read_nli(jsonl_path: AnyStr):
    """ Convert all of the ratings in amazon product XML file to dicts

    :param xml_file: The XML file to convert to a dict
    :return: All of the rows in the xml file as dicts
    """
    reviews = []
    split_map = {'contradiction':0, 'entailment':1, 'neutral':2}
    in_review_text = False
    with open(jsonl_path, encoding='utf8', errors='ignore') as f:
        for line in f:
            data = json.loads(line.strip("\n"))
            if data['gold_label'] != '-':
                reviews.append({'sent1':data['sentence1'], 'sent2':data['sentence2'], 
                                'label': split_map[data['gold_label']], 
                                'domain': NLI_domain_map[data['genre']] if "genre" in data else 0})
    return reviews

def transIrregularWord(line):
    if not line:
        return ''
    line.lower()
    line = re.sub("@[^ \n\t]*", "", line)
    line = re.sub("#[^ \n\t]*", "", line)
    line = re.sub("http(.?)://[^ ]*", "", line)
    return line


class ClsDataset(Dataset):
    """
    Implements a dataset for the multidomain sentiment analysis dataset
    """
    def __init__(
            self,
            tokenizer: PreTrainedTokenizer = None,
    ):
        """

        :param dataset_dir: The base directory for the dataset
        :param domains: The set of domains to load data for
        :param: tokenizer: The tokenizer to use
        :param: domain_ids: A list of ids to override the default domain IDs
        """
        super(ClsDataset, self).__init__()
        self.dataset = pd.DataFrame()
        self.tokenizer = tokenizer

    def set_domain_id(self, domain_id):
        """
        Overrides the domain ID for all data
        :param domain_id:
        :return:
        """
        self.dataset['domain'] = domain_id

    def __len__(self):
        return self.dataset.shape[0]


class NLIDataset(Dataset):
    """
    Implements a dataset for the multidomain sentiment analysis dataset
    """
    def __init__(
            self,
            dataset_path: AnyStr = None,
            domains: List = None,
            tokenizer: PreTrainedTokenizer = None,
            domain_ids: List = None,
            max_data_size: int = -1,
            load_data = True
    ):
        """
        :param dataset_dir: The base directory for the dataset
        :param domains: The set of domains to load data for
        :param: tokenizer: The tokenizer to use
        :param: domain_ids: A list of ids to override the default domain IDs
        """
        super(NLIDataset, self).__init__()
        self.tokenizer = tokenizer
        self.dataset_path = dataset_path
        self.max_data_size = max_data_size
        self.domains = domains
        self.domain_ids = domain_ids
        self.premise, self.hypothesis = None, None
        self._label, self._confidence, self._entrophy = None, None, None
        self.read_indexs = None
        device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        self.collate_raw_batch = collate_SNLI_batch_with_device(device)
        if load_data:
            self.load_data()

    def load_data(self):
        dataset = self.obtainDataset(self.dataset_path, data_size=self.max_data_size)
        if self.domain_ids is not None and self.domains is not None:
            for i in range(len(self.domain_ids)):
                dataset[dataset['domain'] == NLI_domain_map[self.domains[i]]][2] = self.domain_ids[i]
        self.premise = dataset['sent1'].values
        self.hypothesis = dataset['sent2'].values
        self._label = dataset['label'].values
        self._confidence = torch.ones(len(self._label))
        self._entrophy = torch.zeros(len(self._label))
        self.domain = dataset['domain'].values
        self.read_indexs = np.arange(len(self.domain))

    @property
    def label(self):
        return self._label[self.read_indexs]

    def setLabel(self, label, idxs):
        indexs = self.read_indexs[idxs]
        self._label[indexs] = label

    @property
    def confidence(self):
        return self._confidence[self.read_indexs]

    def setConfidence(self, confidence, idxs):
        indexs = self.read_indexs[idxs]
        self._confidence[indexs] = confidence

    @property
    def entrophy(self):
        return self._entrophy[self.read_indexs]

    def setEntrophy(self, entrophy, idxs):
        indexs = self.read_indexs[idxs]
        self._entrophy[indexs] = entrophy

    def labelTensor(self, device=None):
        return torch.tensor(self._label[self.read_indexs], device=device)

    def __len__(self):
        return len(self.read_indexs)

    def domainSelect(self, domain_id):
        if not domain_id in self.domain:
            print(f"Select domain failed, due to the domain_id {domain_id} is missed in data domains")
        read_indexs = np.arange(len(self.domain))
        self.valid_domain_id = domain_id
        self.read_indexs = read_indexs[self.domain.__eq__(domain_id)]

    def obtainDataset(self, dataset_path, data_size=-1):
        data = read_nli(dataset_path)
        if data_size > 0:
            return pd.DataFrame(random.sample(data, data_size))
        return pd.DataFrame(data)

    def __getitem__(self, item) -> Tuple:
        idx = self.read_indexs[item]
        input_ids, mask, seg_ids = text_to_batch_transformer([self.premise[idx]],
                                                              self.tokenizer,
                                                              text_pair=[self.hypothesis[idx]])
        return input_ids, mask, seg_ids, self._label[idx], \
                        self.domain[idx], item

class NLIDatasetV2(NLIDataset):
    """
    Implements a dataset for the multidomain sentiment analysis dataset
    """
    def __init__(
            self,
            dataset_path: AnyStr = None,
            domains: List = None,
            tokenizer: PreTrainedTokenizer = None,
            domain_ids: List = None,
            max_data_size: int = -1,
            load_data=False
    ):
        """
        :param dataset_dir: The base directory for the dataset
        :param domains: The set of domains to load data for
        :param: tokenizer: The tokenizer to use
        :param: domain_ids: A list of ids to override the default domain IDs
        """
        super(NLIDatasetV2, self).__init__(dataset_path, domains, tokenizer,
                                                domain_ids, max_data_size, load_data)

    def load(self):
        dataset = self.obtainDataset(self.dataset_path, data_size=self.max_data_size)
        if self.domain_ids is not None and self.domains is not None:
            for i in range(len(self.domain_ids)):
                dataset[dataset['domain'] == NLI_domain_map[self.domains[i]]][2] = self.domain_ids[i]
        self.premise = dataset['sent1'].values
        self.hypothesis = dataset['sent2'].values
        self._label = dataset['label'].values
        self._confidence = torch.ones(len(self._label))
        self._entrophy = torch.zeros(len(self._label))
        self.domain = dataset['domain'].values
        self.read_indexs = np.arange(len(self.domain))

    def Derive(self, idxs:List):
        new_set = self.__class__(None, self.domains, self.tokenizer, self.domain_ids, -1)
        real_idxs = [self.read_indexs[idx] for idx in idxs]
        new_set.read_indexs = np.arange(len(real_idxs))
        new_set.initLabel(self.label[real_idxs].copy())
        new_set.initConfidence(self.confidence[real_idxs].copy())
        new_set.initEntrophy(self.entrophy[real_idxs].copy())

        new_set.premise = self.premise[real_idxs].copy()
        new_set.hypothesis = self.hypothesis[real_idxs].copy()
        new_set.domain = self.domain[real_idxs]

        self.premise, self.hypothesis, self.domain = np.delete(self.premise, real_idxs), \
                                                        np.delete(self.hypothesis, real_idxs), \
                                                            np.delete(self.domain, real_idxs)
        self._label, self._confidence, self._entrophy = np.delete(self._label, real_idxs), \
                                                            np.delete(self._confidence, real_idxs), \
                                                             np.delete(self._entrophy, real_idxs)
        self.read_indexs = np.arange(len(self.domain))
        if hasattr(self, 'valid_domain_id'):
            self.domainSelect(self.valid_domain_id)
        return new_set

    def Merge(self, another_set:NLIDataset):
        another_premise = another_set.premise[another_set.read_indexs]
        another_hypothesis = another_set.hypothesis[another_set.read_indexs]
        another_domain = another_set.domain[another_set.read_indexs]
        self.premise, self.hypothesis, self.domain = np.concatenate([self.premise, another_premise]), \
                                                        np.concatenate([self.hypothesis, another_hypothesis]), \
                                                            np.concatenate([self.domain, another_domain])
        self._label, self._confidence, self._entrophy = np.concatenate([self._label, another_set.label]), \
                                                            np.concatenate([self._confidence, another_set.confidence]), \
                                                             np.concatenate([self._entrophy, another_set.entrophy])
        extend_idxs = np.arange(len(self._label), len(self._label) + len(another_set), 1)
        self.read_indexs = np.concatenate(
            [self.read_indexs, extend_idxs]
        )

    def initLabel(self, label):
        assert len(self.read_indexs) == len(label)
        self._label = label

    def initConfidence(self, confidence):
        assert len(self.read_indexs) == len(confidence)
        self._confidence = confidence

    def initEntrophy(self, entrophy):
        assert len(self.read_indexs) == len(entrophy)
        self._entrophy = entrophy
