from torch.utils.data import Dataset
import random
import os
import pickle
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import torch.nn as nn
import torch
import sys
import re
sys.path.append("..")
from pytorch_transformers import BertTokenizer
from nltk.stem import WordNetLemmatizer
import nltk

class adict(dict):
    ''' Attribute dictionary - a convenience data structure, similar to SimpleNamespace in python 3.3
        One can use attributes to read/write dictionary content.
    '''
    def __init__(self, *av, **kav):
        dict.__init__(self, *av, **kav)
        self.__dict__ = self

def Lemma_Factory():
    lemmatizer = WordNetLemmatizer()
    def lemma(word_tokens):
        tags = nltk.pos_tag(word_tokens)
        new_words = []
        for pair in tags:
            if pair[1].startswith('J'):
                new_words.append(lemmatizer.lemmatize(pair[0], 'a'))
            elif pair[1].startswith('V'):
                new_words.append(lemmatizer.lemmatize(pair[0], 'v'))
            elif pair[1].startswith('N'):
                new_words.append(lemmatizer.lemmatize(pair[0], 'n'))
            elif pair[1].startswith('R'):
                new_words.append(lemmatizer.lemmatize(pair[0], 'r'))
            else:
                new_words.append(pair[0])
        return new_words
    return lemma

def Merge_data(data_set1, data_set2):
    new_data = data_set1.__class__()
    new_data.data = dict(data_set1.data, **data_set2.data)
    new_data.data_ID = np.concatenate([np.array(data_set1.data_ID),
                              np.array(data_set2.data_ID)]).tolist()
    new_data.data_len = np.concatenate([np.array(data_set1.data_len),
                            np.array(data_set2.data_len)]).tolist()
    new_data.data_y = np.concatenate([np.array(data_set1.data_y),
                            np.array(data_set2.data_y)]).tolist()
    return new_data

def Sample_data(dataset, idx_list):
    new_twitter = dataset.__class__()
    new_twitter.data_ID = [dataset.data_ID[idx] for idx in idx_list]
    new_twitter.data_len = [dataset.data_len[idx] for idx in idx_list]
    new_twitter.data_y = [dataset.data_y[idx] for idx in idx_list]
    new_twitter.data = dataset.data
    return new_twitter

def Sort_data(tr_set, dev_set, te_set):
    bigDic = dict(dict(tr_set.data, **dev_set.data), **te_set.data)
    all_IDs = np.concatenate([np.array(tr_set.data_ID),
                              np.array(dev_set.data_ID),
                              np.array(te_set.data_ID)]).tolist()
    all_l = np.concatenate([np.array(tr_set.data_len),
                            np.array(dev_set.data_len),
                            np.array(te_set.data_len)]).tolist()
    all_y = np.concatenate([np.array(tr_set.data_y),
                            np.array(dev_set.data_y),
                            np.array(te_set.data_y)]).tolist()
    tr_len, dev_len, te_len = len(tr_set), len(dev_set), len(te_set)

    source_created_at = np.concatenate([
        np.array([tr_set.data[ID]['created_at'][0] for ID in tr_set.data_ID]),
        np.array([dev_set.data[ID]['created_at'][0] for ID in dev_set.data_ID]),
        np.array([te_set.data[ID]['created_at'][0] for ID in te_set.data_ID]),
    ])

    idxs = source_created_at.argsort()
    tr_ids, dev_ids, te_ids = ([all_IDs[idx] for idx in l] for l in
                               [idxs[:tr_len], idxs[tr_len:tr_len + dev_len], idxs[-dev_len:]])
    tr_y, dev_y, te_y = ([all_y[idx] for idx in l] for l in
                         [idxs[:tr_len], idxs[tr_len:tr_len + dev_len], idxs[-dev_len:]])
    tr_l, dev_l, te_l = ([all_l[idx] for idx in l] for l in
                         [idxs[:tr_len], idxs[tr_len:tr_len + dev_len], idxs[-dev_len:]])

    tr_set.data = {ID: bigDic[ID] for ID in tr_ids}
    dev_set.data = {ID: bigDic[ID] for ID in dev_ids}
    te_set.data = {ID: bigDic[ID] for ID in te_ids}

    tr_set.data_ID = tr_ids
    tr_set.data_len = tr_l
    tr_set.data_y = tr_y

    dev_set.data_ID = dev_ids
    dev_set.data_len = dev_l
    dev_set.data_y = dev_y

    te_set.data_ID = te_ids
    te_set.data_len = te_l
    te_set.data_y = te_y
    return tr_set, dev_set, te_set

def shuffle_data(tr_set, dev_set, te_set):
    bigDic = dict(dict(tr_set.data, **dev_set.data), **te_set.data)
    all_IDs = np.concatenate([np.array(tr_set.data_ID),
                              np.array(dev_set.data_ID),
                              np.array(te_set.data_ID)]).tolist()
    all_l = np.concatenate([np.array(tr_set.data_len),
                              np.array(dev_set.data_len),
                              np.array(te_set.data_len)]).tolist()
    all_y = np.concatenate([np.array(tr_set.data_y),
                              np.array(dev_set.data_y),
                              np.array(te_set.data_y)]).tolist()
    tr_len, dev_len, te_len = len(tr_set), len(dev_set), len(te_set)
    idxs = random.sample(list(range(tr_len + dev_len + te_len)), tr_len+dev_len+te_len)
    tr_ids, dev_ids, te_ids = ([all_IDs[idx] for idx in l] for l in
                               [idxs[:tr_len], idxs[tr_len:tr_len + dev_len], idxs[-dev_len:]])
    tr_y, dev_y, te_y = ([all_y[idx] for idx in l] for l in
                         [idxs[:tr_len], idxs[tr_len:tr_len + dev_len], idxs[-dev_len:]])
    tr_l, dev_l, te_l = ([all_l[idx] for idx in l] for l in
                         [idxs[:tr_len], idxs[tr_len:tr_len + dev_len], idxs[-dev_len:]])

    tr_set.data = {ID:bigDic[ID] for ID in tr_ids }
    dev_set.data = {ID:bigDic[ID] for ID in dev_ids}
    te_set.data = {ID:bigDic[ID] for ID in te_ids}

    tr_set.data_ID = tr_ids
    tr_set.data_len = tr_l
    tr_set.data_y = tr_y

    dev_set.data_ID = dev_ids
    dev_set.data_len = dev_l
    dev_set.data_y = dev_y

    te_set.data_ID = te_ids
    te_set.data_len = te_l
    te_set.data_y = te_y
    return tr_set, dev_set, te_set

def mask_tokens(inputs, tokenizer, mlm_probability):
    """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """

    labels = inputs.clone()
    # We sample a few tokens in each sequence for masked-LM training (with probability mlm_probability defaults to 0.15 in Bert/RoBERTa)
    masked_indices = torch.bernoulli(torch.full(labels.shape, mlm_probability)).bool()
    labels[~masked_indices] = -1  # We only compute loss on masked tokens
    # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
    indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
    inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
    # 10% of the time, we replace masked input tokens with random word
    indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
    random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
    inputs[indices_random] = random_words[indices_random]
    # The rest of the time (10% of the time) we keep the masked input tokens unchanged
    return inputs, labels

class Tree(object):
    def __init__(self, root_idx=0, edges=None):
        self.parent = None
        self.root_idx = root_idx
        self.num_children = 0
        self.children = list()
        if edges is not None:
            self.Construct(edges)

    def Construct(self, edges):
        parent_trees = [self]
        while parent_trees:
            child_trees = []
            for edge in edges:
                for tree in parent_trees:
                    if edge[0] == tree.root_idx:
                        child_tree = Tree(root_idx=edge[1])
                        tree.add_child(child_tree)
                        child_trees.append(child_tree)
            parent_trees = child_trees

    def add_child(self, child):
        child.parent = self
        self.num_children += 1
        self.children.append(child)

    def size(self):
        if getattr(self, '_size', False):
            return self._size
        count = 1
        for i in range(self.num_children):
            count += self.children[i].size()
        self._size = count
        return self._size

    def depth(self):
        if getattr(self, '_depth', False):
            return self._depth
        count = 0
        if self.num_children > 0:
            for i in range(self.num_children):
                child_depth = self.children[i].depth()
                if child_depth > count:
                    count = child_depth
            count += 1
        self._depth = count
        return self._depth

    def nodes(self):
        if getattr(self, "_nodes", False):
            return self._nodes
        if self.num_children > 0:
            self._nodes = [self] + [node for i in range(self.num_children)
                                     for node in self.children[i].nodes()]
        else:
            self._nodes = [self]
        return self._nodes

    def leaf_node_idxs(self):
        if getattr(self, "_leaf_node_idxs", False):
            return self._leaf_node_idxs
        if self.num_children > 0:
            self._leaf_node_idxs = [idx for i in range(self.num_children)
                                    for idx in self.children[i].leaf_node_idxs()]
        else:
            self._leaf_node_idxs = [self.root_idx]
        return self._leaf_node_idxs

class Text2TFIDF(object):
    def __init__(self):
        self.vectorizer = CountVectorizer()
        self.transf = TfidfTransformer()
    def text2vector(self, corpus):
        X = self.vectorizer.fit_transform(corpus)
        return X.toarray(), self.vectorizer.get_feature_names()
    def vector2tfidf(self, vectors):
        tfidf = self.transf.fit_transform(vectors)
        return tfidf
    def text2tfidf(self, corpus):
        vec, vocab = self.text2vector(corpus)
        tfidf = self.vector2tfidf(vec)
        return tfidf.toarray(), vocab

class BertEmbedding(object):
    def __init__(self, bert_dir, emb_file):
        self.tokenizer = BertTokenizer.from_pretrained(bert_dir)
        self.emb = nn.Embedding(len(self.tokenizer), 768)
        if os.path.exists(emb_file):
            ch = torch.load(emb_file)
            self.emb.load_state_dict(ch['embeddings'])
        else:
            print("embedding_file %s not exists!" % emb_file)
            sys.exit(0)

    def sentence2wordvecs(self, sent):
        text_inputs = torch.tensor(self.tokenizer.encode(sent, add_special_tokens=True))
        with torch.no_grad():
            embeddings = [self.emb(ipt_ids) for ipt_ids in text_inputs]
        return embeddings

class w2v_model(object):
    def __init__(self, w2v_file="./word2vec.txt"):
        super(w2v_model, self).__init__()
        with open(w2v_file, "rb") as handle:
            self.word2vec = pickle.load(handle)

    def lookup(self, word):
        try:
            vec = self.word2vec[word]
        except KeyError:
            vec = self.word2vec['{'] +self.word2vec['unknown'] +  self.word2vec["word"] + self.word2vec['}']
        except:
            raise
        return vec

    def words_list2words_tensor_EN(self, w_list):
        return np.stack([self.lookup(word) for word in w_list])

    def sentence2wordlist(self, sentence):
        txt_l = sentence.split(' ')
        l = []
        word_idx = 0
        while word_idx < len(txt_l):
            if txt_l[word_idx] == '{':
                tmp = []
                while txt_l[word_idx] != '}':
                    tmp.append(txt_l[word_idx])
                    word_idx += 1
                tmp.append(txt_l[word_idx])
                word_idx += 1
                l.append(tmp)
            else:
                l.append(txt_l[word_idx])
                word_idx += 1
        return l

    def wordlist2wordvecs(self, wordlist):
        vecs = []
        for item in wordlist:
            if isinstance(item, str):
                word = re.sub(r"^[, . ? ! :]*|[, . ? ! :]*$", "", item)
                if word in self.word2vec.vocab:
                    vec = self.word2vec[word]
                else:
                    vec = self.word2vec['{'] +self.word2vec['an'] +  self.word2vec['unknown'] + self.word2vec['word'] + self.word2vec['}']
                vecs.append(vec)
            elif isinstance(item, list):
                if item[0] in self.word2vec.vocab:
                    vec = self.word2vec[item[0]]
                else:
                    vec = self.word2vec['{'] + self.word2vec['an'] + self.word2vec['unknown'] + self.word2vec['word'] + self.word2vec['}']
                for w in item[1:]:
                    if w in self.word2vec.vocab:
                        vec = vec + self.word2vec[w]
                    else:
                        vec = vec + self.word2vec['{'] + self.word2vec['an'] + self.word2vec['unknown'] + self.word2vec['word'] + self.word2vec['}']
                vecs.append(vec)
        return vecs

    def sentence2wordvecs(self, sentence):
        l = self.sentence2wordlist(sentence)
        vecs = self.wordlist2wordvecs(l)
        return vecs

class w2v_model_CN(object):
    def __init__(self):
        with open("./word2vec_CN_WeiboBi.pkl", "rb") as handle:
            self.word2vec = pickle.load(handle)
        print("load word2vec finished")

    def lookup(self, item):
        if item in self.word2vec.vocab:
            vec = self.word2vec[item]
        else:
            vec = self.word2vec['{'] + self.word2vec['未知'] + self.word2vec['词'] + self.word2vec['}']
        return vec

    def words_list2words_tensor(self, w_list):
        return np.stack([self.lookup(word) for word in w_list])

    def wordlist2wordvecs(self, wordlist):
        vecs = []
        for item in wordlist:
            if isinstance(item, str):
                item = item.strip()
                if item != '':
                    vec = self.lookup(item)
                    vecs.append(vec)
            elif isinstance(item, list):
                vec = np.zeros(300, dtype=np.float32)
                for w in item:
                    w = w.strip()
                    if w !='':
                        vec = vec + self.lookup(w)
                vecs.append(vec)
            else:
                print("error! word list needs to be checked!")
                print("wordlist:", wordlist)
                sys.exit(0)
        return vecs

    def sentence2wordlist(self, sentence):
        l = []
        word_idx = 0
        while word_idx < len(sentence):
            if sentence[word_idx] == '[':
                tmp = []
                try:
                    while sentence[word_idx] != ']':
                        if sentence[word_idx] == '[':
                            l.extend(tmp)
                            tmp = []
                        tmp.append(sentence[word_idx])
                        word_idx += 1
                except IndexError:
                    l.extend(tmp)
                else:
                    tmp.append(sentence[word_idx])
                    word_idx += 1
                    l.append(tmp)
            else:
                l.append(sentence[word_idx])
                word_idx += 1
        return l

class RandomSampler(object):
    def __init__(self, data:Dataset, collate_fn):
        super(RandomSampler, self).__init__()
        self.collate_fn = collate_fn
        self.data = data
        self.index = list(range(len(self.data)))

    def sample(self, batch_size):
        sampled_idxs = random.sample(self.index, batch_size)
        batch = [self.data.__getitem__(idx) for idx in sampled_idxs]
        return self.collate_fn(batch)

class RumorLoader(Dataset):
    def __init__(self):
        super(RumorLoader, self).__init__()
        self.data = {}
        self.data_ID = []
        self.data_len = []
        self.data_y = []
        self.sample_len = -1

    def split(self, percent=[0.5, 1.0]):
        data_size = len(self.data_ID)
        cnt = [int(item*data_size) for item in percent]
        cnt.insert(0, 0)
        new_idxs = list(random.sample(list(range(data_size)), data_size))
        rst = [self.__class__() for _ in percent]
        for i in range(len(percent)):
            rst[i].data_ID = [self.data_ID[idx] for idx in new_idxs[cnt[i]:cnt[i+1]]]
            rst[i].data_len = [self.data_len[idx] for idx in new_idxs[cnt[i]:cnt[i + 1]]]
            rst[i].data_y = [self.data_y[idx] for idx in new_idxs[cnt[i]:cnt[i + 1]]]
            rst[i].data = {ID:self.data[ID] for ID in rst[i].data_ID}
        return rst

    def select(self, idxs):
        obj = self.__class__()
        obj.data_ID = [self.data_ID[idx] for idx in idxs]
        obj.data_len = [self.data_len[idx] for idx in idxs]
        obj.data_y = [self.data_y[idx] for idx in idxs]
        obj.data = {ID:self.data[ID] for ID in self.data_ID}
        return obj

    def BalancedSplit(self, percent=[0.5, 1.0]):
        """
        the first subset is balanced.
        """
        data_size = len(self.data_ID)
        cnt_list = [int(item * data_size) for item in percent] if max(percent)<=1.0 else percent

        indices = torch.arange(data_size)
        label = torch.tensor(self.data_y).argmax(dim=1)
        pos_idxs = indices[label.__eq__(1)].tolist()
        neg_idxs = indices[label.__eq__(0)].tolist()
        random.shuffle(pos_idxs), random.shuffle(neg_idxs)
        idxs_list = [pos_idxs[:cnt_list[0] // 2] + neg_idxs[:cnt_list[0] - cnt_list[0] // 2]]
        rest_idxs = pos_idxs[cnt_list[0] // 2:] + neg_idxs[cnt_list[0] - cnt_list[0] // 2:]
        random.shuffle(rest_idxs)
        cnt_list = [cnt - len(idxs_list[0]) for cnt in cnt_list]
        for i in range(len(percent) - 1):
            idxs_list.append(rest_idxs[cnt_list[i]:cnt_list[i + 1]])
        rst = [self.__class__() for _ in percent]
        for i in range(len(percent)):
            rst[i].data_ID = [self.data_ID[idx] for idx in idxs_list[i]]
            rst[i].data_len = [self.data_len[idx] for idx in idxs_list[i]]
            rst[i].data_y = [self.data_y[idx] for idx in idxs_list[i]]
            rst[i].data = {ID: self.data[ID] for ID in rst[i].data_ID}
        return rst

    def Caches_Data(self, data_prefix="../data/data"):
        data_dic  = "%s_dict.txt" % data_prefix
        y_npy = "%s_y.npy" % data_prefix
        id_npy = "%s_ID.npy" % data_prefix
        len_npy = "%s_len.npy" % data_prefix
        with open(data_dic, "wb") as fw:
            pickle.dump(self.data, fw, protocol=pickle.HIGHEST_PROTOCOL)
        np.save(id_npy, np.array(self.data_ID))
        np.save(y_npy, np.array(self.data_y))
        np.save(len_npy, np.array(self.data_len))

    def load_data_fast(self, data_prefix="../data/train", min_len=-1):
        dic_file = "%s_dict.txt"%data_prefix
        id_npy = "%s_ID.npy"%data_prefix
        len_npy = "%s_len.npy"%data_prefix
        y_npy = "%s_y.npy"%data_prefix
        assert os.path.exists(dic_file) \
                and os.path.exists(id_npy) \
                    and os.path.exists(len_npy) \
                        and os.path.exists(y_npy)
        with open(dic_file, "rb") as handle:
           self.data = pickle.load(handle)
        self.data_ID = np.load(id_npy).tolist()
        self.data_len = np.load(len_npy).tolist()
        self.data_y = np.load(y_npy).tolist()
        if min_len > 0:
            self.filter_short_seq(min_len)
        print("load len: ", len(self.data_ID))

    def filter_short_seq(self, min_len):
        idxs = [idx for idx, l in enumerate(self.data_len) if l > min_len]
        self.data_ID = [self.data_ID[idx] for idx in idxs]
        self.data_len = [self.data_len[idx] for idx in idxs]
        self.data_y = [self.data_y[idx] for idx in idxs]

    def trim_long_seq(self, max_len):
        self.data_len = [min(l, max_len) for l in self.data_len]

    def ResortSample(self, sample_len=-1):
        try:
            assert sample_len >= min(self.data_len)
        except:
            print("Failed to set the sample_len")
        else:
            self.sample_len = sample_len

    def __len__(self):
        return len(self.data_ID)

    def __getitem__(self, index):
        pass

    def collate_raw_batch(self, batch):
        pass

    def InnerBatch(self, batchsize):
        idxs = random.sample(range(len(self.data_ID)), batchsize)
        batch = [self.__getitem__(idx) for idx in idxs]
        return self.collate_raw_batch(batch)