import os
import time
import datetime
import numpy as np
import random
import math
import re
import pdb
import pickle
import torch
import json
import sys
import pandas as pd
from tqdm import tqdm
import pkuseg
from .dataloader_utils import Tree
from torch.utils.data import Dataset
from .dataloader_utils import RumorLoader, Text2TFIDF
import dgl
import nltk
from nltk import WordNetLemmatizer

event_dics = {
    'charliehebdo': 0,
    'ferguson': 1,
    'germanwings-crash': 2,
    'ottawashooting': 3,
    'sydneysiege': 4
}

def get_curtime():
    return time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))

def str2timestamp(str_time):
    month = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04',
             'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08',
             'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'}
    ss = str_time.split(' ')
    m_time = ss[5] + "-" + month[ss[1]] + '-' + ss[2] + ' ' + ss[3]
    d = datetime.datetime.strptime(m_time, "%Y-%m-%d %H:%M:%S")
    t = d.timetuple()
    timeStamp = int(time.mktime(t))
    return timeStamp

def sortTempList(temp_list):
    time = np.array([item[0] for item in temp_list])
    posts = np.array([item[1] for item in temp_list])
    idxs = time.argsort().tolist()
    rst = [[t, p] for (t, p) in zip(time[idxs], posts[idxs])]
    del time, posts
    return rst

class SentiReader(Dataset):
    def __init__(self, data_csv_file):
        df = pd.read_csv(data_csv_file).dropna()
        self.sents = [self.lineClear(line) for line in df['content'].values.tolist()]
        self.label = [item for item in df['label'].values.tolist()]
        self.idxs = list(range(len(self.label)))

    def lineClear(self, line):
        line = line.lower().strip("\t").strip("\n")
        line = re.sub("@[^ :]*", " @ ", line)
        line = re.sub("#[^# ]*#", " # ", line)
        line = re.sub("http(.?)://[^ ]*", " url ", line)
        return line

    def collate_raw_batch(self, batch):
        sents = [item[0] for item in batch]
        labels = [item[1] for item in batch]
        return sents, torch.tensor(labels)

    def sample_batch(self, batch_size):
        batch_idxs = random.sample(self.idxs, batch_size)
        batch = [self.__getitem__(idx) for idx in batch_idxs]
        return self.collate_raw_batch(batch)

    def __getitem__(self, index):
        return self.sents[index], self.label[index]

    def __len__(self):
        return len(self.sents)

class TopicReader(Dataset):
    def __init__(self, data_csv_file):
        df = pd.read_csv(data_csv_file)
        self.sents = [self.lineClear(line) for line in df['content'].values.tolist()]
        self.label = [event_dics[item] for item in df['event'].values.tolist()]

    def lineClear(self, line):
        line = line.lower().strip("\t").strip("\n")
        line = re.sub("@[^ :]*", " @ ", line)
        line = re.sub("#[^# ]*#", " # ", line)
        line = re.sub("http(.?)://[^ ]*", " url ", line)
        return line

    def collate_raw_batch(self, batch):
        sents = [item[0] for item in batch]
        labels = [item[1] for item in batch]
        return sents, torch.tensor(labels)

    def __getitem__(self, index):
        return self.sents[index], self.label[index]

    def __len__(self):
        return len(self.sents)

class LMReader(Dataset):
    def __init__(self, data_file):
        super(LMReader, self).__init__()
        with open(data_file) as fr:
            self.content = [line.strip("\n") for line in fr]

    def __getitem__(self, index):
        return self.content[index]

    def __len__(self):
        return len(self.content)

class AirlineSenti(Dataset):
    def __init__(self, batchsize=20, shuffle=True):
        super(AirlineSenti, self).__init__()
        self.dic = {'negative':0, 'neutral':1, 'positive':2}
        userdic = [':)', '[', ']', '。', ',', '，', '{', '}', '(', ')', '!', '！', '~', '～',
                   '"', ':', '+', '-', '$', '.', '?', '<', '>', '|', '=', '...']
        self.seg = pkuseg.pkuseg(user_dict=userdic)
        self.batchsize = batchsize
        self.shuffle = shuffle
        self.label = np.array([])
        self.label_confidence = np.array([])
        self.words = np.array([])

    def load_data(self, data_file):
        df = pd.read_csv(data_file)
        self.label = np.array([self.dic[label] for label in df['airline_sentiment'].values.tolist()])
        self.label_confidence = df['airline_sentiment_confidence'].values
        self.words = df['text'].values

    def text2words(self, line, seg=None):
        if not line:
            return ''
        line.lower()
        line = self.clear_line(line)
        if seg is not None:
            return seg.cut(line)
        return line.split()

    def clear_line(self, line):
        line = re.sub("@[^ \n\t]*", "@ ", line)
        line = re.sub("#[^ \n\t]*", "# ", line)
        line = re.sub("http(.?)://[^ ]*", "url ", line)
        return line

    def sample(self, batchsize=-1):
        batchsize = self.batchsize if batchsize == -1 else batchsize
        idxs = random.sample(self.idxs.tolist(), batchsize)
        return self.words[idxs], torch.tensor(self.label[idxs]), torch.tensor(self.words_num[idxs]), torch.tensor(self.label_confidence[idxs], dtype=torch.float32)

    def __len__(self):
        return len(self.words)

    def __getitem__(self, index):
        return self.words[index], self.label[index], self.label_confidence[index]

    def collate_raw_fn(self, batch):
        words = [self.clear_line(item[0]) for item in batch]
        labels = [item[1] for item in batch]
        confidences = [item[2] for item in batch]
        return words, torch.tensor(labels), torch.tensor(confidences)

    def collate_fn(self, batch):
        words = [self.text2words(item[0], self.seg) for item in batch]
        labels = [item[1] for item in batch]
        word_nums = [len(sent) for sent in words]
        confidences = [item[2] for item in batch]
        return words, torch.tensor(labels), torch.tensor(word_nums), torch.tensor(confidences)

    def split(self, percent=[0.1, 0.3, 1.0]):
        data_size = len(self.label)
        cnt = [int(item*data_size) for item in percent]
        cnt.insert(0, 0)
        new_idxs = list(random.sample(list(range(data_size)), data_size))
        rst = [AirlineSenti() for _ in percent]
        for i in range(len(percent)):
            rst[i].words = self.words[new_idxs[cnt[i]:cnt[i+1]]]
            rst[i].label = self.label[new_idxs[cnt[i]:cnt[i+1]]]
            rst[i].label_confidence = self.label_confidence[new_idxs[cnt[i]:cnt[i+1]]]
        return rst

class SubReader(Dataset):
    def __init__(self, data_file, batchsize=20):
        super(SubReader, self).__init__()
        df = pd.read_csv(data_file)
        self.label = df['label'].values
        self.words = df['content'].values
        userdic = [':)', '[', ']', '。', ',', '，', '{', '}', '(', ')', '!', '！', '~', '～',
                   '"', ':', '+', '-', '$', '.', '?', '<', '>', '|', '=', '...']
        self.seg = pkuseg.pkuseg(user_dict=userdic)
        self.batchsize = batchsize

    def sample(self, batchsize=-1):
        batchsize = self.batchsize if batchsize == -1 else batchsize
        idxs = random.sample(self.idxs.tolist(), batchsize)
        return self.words[idxs], torch.tensor(self.label[idxs]), torch.tensor(self.words_num[idxs])

    def __len__(self):
        return len(self.words)

    def __getitem__(self, index):
        return self.words[index], torch.tensor(self.label[index])

    def text2words(self, line, seg=None):
        if not line:
            return ''
        line.lower()
        line = self.clear_line(line)
        if seg is not None:
            return seg.cut(line)
        return line.split()

    def clear_line(self, line):
        line = re.sub("@[^ \n\t]*", "@ ", line)
        line = re.sub("#[^ \n\t]*", "# ", line)
        line = re.sub("http(.?)://[^ ]*", "url ", line)
        return line

    def collate_raw_fn(self, batch):
        sents = [self.clear_line(item[0]) for item in batch]
        labels = [item[1] for item in batch]
        return sents, torch.tensor(labels)

    def collate_fn(self, batch):
        words = [self.text2words(item[0], self.seg) for item in batch]
        labels = [item[1] for item in batch]
        word_nums = [len(sent) for sent in words]
        return words, torch.tensor(labels), torch.tensor(word_nums)

class TwitterLoader(RumorLoader):
    def __init__(self):
        super(TwitterLoader, self).__init__()
        self.files = []
        userdic = [':)', '[', ']', '。', ',', '，', '{', '}', '(', ')', '!', '！',
                   '~', '～', '"', ':', '+', '-', '$', '.', '?', '<', '>', '|', '=', '...']
        self.seg = pkuseg.pkuseg(user_dict=userdic)

    def transIrregularWord(self, line, seg=None):
        if not line:
            return ''
        line.lower()
        line = re.sub("@[^ \n\t]*", " @ ", line)
        line = re.sub("#[^ \n\t]*", " # ", line)
        line = re.sub("http(.?)://[^ ]*", " url ", line)
        if seg is not None:
            return seg.cut(line)
        return line.split()

    def list_files(self, path):
        self.scan_dir(path)

    def scan_dir(self, dir_name):
        for item in os.walk(dir_name):
            if len(item[2]) == 0:
                # no file in this dir
                pass
            else:
                for fname in item[2]:
                    tmp_path = os.path.join(item[0], fname)
                    if tmp_path[-5:] == ".json": # is a json file
                        self.files.append(tmp_path)
                    else:
                        print("Warning: non json format file exists in %s : %s" % (dir_name, tmp_path))


    def data_process(self, file_path, content=None):
        ret = {}
        ss = file_path.split("/")
        if content is not None:
            data = content
        else:
            data = json.load(open(file_path, mode="r", encoding="utf-8"))
        # 'Wed Jan 07 11:14:08 +0000 2015'
        ret[ss[-3]] = {
                        'topic_label': event_dics[ss[-5]],
                        'label': ss[-4],
                        'event': ss[-5],
                        'sentence': [data['text'].lower()],
                        'created_at': [str2timestamp(data['created_at'])],
                        'tweet_id': [data['id']],
                        "reply_to": [data['in_reply_to_status_id']]
                       }
        return ret

    def fetch(self, twitter_dict):
        for key in twitter_dict.keys():  # use temporary data to organize the final whole data
            if key in self.data:
                if twitter_dict[key]['tweet_id'][0] in self.data[key]['tweet_id']:
                    pass  # sometimes, there are dumlicated posts
                else:
                    self.data[key]['tweet_id'].append(twitter_dict[key]['tweet_id'][0])
                    self.data[key]['sentence'].append(twitter_dict[key]['sentence'][0])
                    self.data[key]['created_at'].append(twitter_dict[key]['created_at'][0])
                    self.data[key]['reply_to'].append(twitter_dict[key]['reply_to'][0])
            else:
                self.data[key] = twitter_dict[key]

    def read_json_files(self, files):
        for file in tqdm(files):
            twitter_dict = self.data_process(file)  # read out the information from json file, and organized it as {dataID:{'key':val}}
            self.fetch(twitter_dict)

    def read_json_caches(self, files, caches):
        assert len(files) == len(caches)
        for file, content in tqdm(zip(files, caches)):
            twitter_dict = self.data_process(file, content)
            self.fetch(twitter_dict)

    def preprocess_files(self, cached_contents=None):
        print("preprocessing files: ")
        if cached_contents is not None:
            self.read_json_caches(self.files, cached_contents)
            return
        self.read_json_files(self.files)

    def sort_by_timeline(self, key, temp_idxs):
        self.data[key]['sentence'] = [self.data[key]['sentence'][idx] for idx in temp_idxs]
        self.data[key]['created_at'] = [self.data[key]['created_at'][idx] for idx in temp_idxs]
        self.data[key]['tweet_id'] = [self.data[key]['tweet_id'][idx] for idx in temp_idxs]
        self.data[key]['reply_to'] = [self.data[key]['reply_to'][idx] for idx in temp_idxs]

    def gather_posts(self, key, temp_idxs, post_fn):
        id2idx = {t_id: idx for idx, t_id in enumerate(self.data[key]['tweet_id'])}
        id2idx[None] = -1
        self.data[key]['text'] = []
        ttext = ""
        for i in range(len(temp_idxs)):
            if i % post_fn == 0:  # merge the fixed number of texts in a time interval
                if len(ttext) > 0:  # if there are data already in ttext, output it as a new instance
                    words = self.transIrregularWord(ttext, self.seg)
                    self.data[key]['text'].append(words)
                ttext = self.data[key]['sentence'][i]
            else:
                ttext += " " + self.data[key]['sentence'][i]
        # keep the last one
        if len(ttext) > 0:
            words = self.transIrregularWord(ttext)
            self.data[key]['text'].append(words)

    def dataclear(self, post_fn=1):
        print("data clear:")
        for key, value in tqdm(self.data.items()):
            temp_idxs = np.array(self.data[key]['created_at']).argsort().tolist()
            self.sort_by_timeline(key, temp_idxs)
            self.gather_posts(key, temp_idxs, post_fn)

        for key in self.data.keys():
            self.data_ID.append(key)
        self.data_ID = random.sample(self.data_ID, len(self.data_ID))  # shuffle the data id

        for i in range(len(self.data_ID)):  # pre processing the extra informations
            self.data_len.append(len(self.data[self.data_ID[i]]['text']))
            if self.data[self.data_ID[i]]['label'] == "rumours":
                self.data_y.append([0.0, 1.0])
            else:
                self.data_y.append([1.0, 0.0])

    def load_data(self, data_path = "../pheme-rnr-dataset/"):
        self.scan_dir(data_path)
        self.read_json_files(self.files)
        self.dataclear()

    def load_events_from_cache(self, event_list, cache_file):
        with open(cache_file, 'rb') as fr:
            caches = pickle.load(fr)
            # rename the cached dict
            caches = {key.split('/')[-1] : caches[key] for key in caches}
        for event_path in event_list:
            event_name = event_path.split("/")[-1]
            self.read_json_caches(caches[event_name]['files'], caches[event_name]['data'])
        self.dataclear()

    def load_event_list(self, event_list, cached_pkl_file=None):
        if cached_pkl_file is not None:
            if os.path.exists(cached_pkl_file):
                self.load_events_from_cache(event_list, cached_pkl_file)
                return

        for event_path in event_list:
            self.scan_dir(event_path)
        self.read_json_files(self.files)
        self.dataclear()

class TwitterSet(TwitterLoader):
    def __init__(self, batch_size=20):
        super(TwitterSet, self).__init__()
        self.batch_size = batch_size
        self.sample_len = -1
        self.lemmatizer = WordNetLemmatizer()

    def lemma(self, word_tokens):
        tags = nltk.pos_tag(word_tokens)
        new_words = []
        for pair in tags:
            if pair[1].startswith('J'):
                new_words.append(self.lemmatizer.lemmatize(pair[0], 'a'))
            elif pair[1].startswith('V'):
                new_words.append(self.lemmatizer.lemmatize(pair[0], 'v'))
            elif pair[1].startswith('N'):
                new_words.append(self.lemmatizer.lemmatize(pair[0], 'n'))
            elif pair[1].startswith('R'):
                new_words.append(self.lemmatizer.lemmatize(pair[0], 'r'))
            else:
                new_words.append(pair[0])
        return new_words

    def collate_raw_batch(self, batch):
        seqs = [item[0] for item in batch]
        lens = [item[1] for item in batch]
        labels = [item[2] for item in batch]
        topic_labels = [item[3] for item in batch]
        return seqs, torch.tensor(lens), torch.tensor(labels).argmax(dim=1), torch.tensor(topic_labels)

    def __len__(self):
        return len(self.data_ID)

    def __getitem__(self, index):
        if self.sample_len != -1:
            tmp_seq = [" ".join(self.lemma(self.data[self.data_ID[index]]['text'][j])) for j in range(self.data_len[index])]
            new_len = min(self.sample_len, len(tmp_seq))
            seq = tmp_seq[0:1] + [tmp_seq[idx] for idx in np.sort(random.sample(list(range(1, len(tmp_seq))), new_len-1))]
            return seq, len(seq), self.data_y[index], self.data[self.data_ID[index]]['topic_label']
        else:
            seq = [" ".join(self.lemma(self.data[self.data_ID[index]]['text'][j])) for j in range(self.data_len[index])]
            return seq, self.data_len[index], self.data_y[index], self.data[self.data_ID[index]]['topic_label']

class CAMI_TwitterSet(TwitterSet):
    def __init__(self, batch_size=20, seq_len=20):
        super(CAMI_TwitterSet, self).__init__(batch_size)
        self.fixed_seq_len = seq_len

    def __getitem__(self, index):
        gap = self.data_len[index]//self.fixed_seq_len
        gap = 1 if gap == 0 else gap
        seq = []
        for j in range(0, self.data_len[index], gap):
            s = ""
            for i in range(j, j+gap, 1):
                s = s + " ".join(self.lemma(self.data[self.data_ID[index]]['text'][j]))
            seq.append(s)
        return seq, len(seq), self.data_y[index], self.data[self.data_ID[index]]['topic_label']

class GraphTwitterSet(TwitterSet):
    def __init__(self, batch_size=20):
        super(GraphTwitterSet, self).__init__(batch_size=batch_size)

    def collate_raw_batch(self, batch):
        seqs = [item[0] for item in batch]
        graphs = [item[1] for item in batch]
        labels = [item[2] for item in batch]
        topic_labels = [item[3] for item in batch]
        return seqs, graphs, torch.tensor(labels).argmax(dim=1), torch.tensor(topic_labels)

    def __getitem__(self, index):
        d_ID = self.data_ID[index]
        tIds_dic = {ID: idx for idx, ID in enumerate(self.data[d_ID]["tweet_id"])}
        src = np.arange(0, len(self.data[d_ID]["tweet_id"]), 1)
        dst = np.array([tIds_dic[ID] for ID in self.data[d_ID]["reply_to"][1:]])
        g = dgl.graph((src[1:], dst), num_nodes=len(src))
        g = dgl.to_bidirected(g, readonly=False)
        seq = [" ".join(self.lemma(self.data[self.data_ID[index]]['text'][j])) for j in range(self.data_len[index])]
        return seq, g, self.data_y[index], self.data[self.data_ID[index]]['topic_label']

class TreeTwitterSet(TwitterSet):
    def __init__(self, batch_size=20):
        super(TreeTwitterSet, self).__init__(batch_size=batch_size)

    def init_trees(self):
        self.data_trees = []
        for index, d_ID in enumerate(tqdm(self.data_ID)):
            tIds_dic = {}
            dup_cnt = 0
            dup_idxs = []
            for idx, ID in enumerate(self.data[d_ID]["tweet_id"][:self.data_len[index]]):
                if ID in tIds_dic:
                    self.data_len[index] -= 1
                    dup_cnt += 1
                    dup_idxs.append(idx)
                else:
                    tIds_dic[ID] = idx - dup_cnt
            for i, idx in enumerate(dup_idxs):
                self.data[d_ID]["tweet_id"].pop(idx - i)
                self.data[d_ID]["reply_to"].pop(idx - i)
                self.data[d_ID]["text"].pop(idx - i)
                self.data[d_ID]["sentence"].pop(idx - i)
                self.data[d_ID]["created_at"].pop(idx - i)
                self.data[d_ID]["reply_idx"].pop(idx - i)
            edges = [(tIds_dic[dst_ID], src_idx+1) if dst_ID in tIds_dic else (0, src_idx+1)
                     for src_idx, dst_ID in enumerate(self.data[d_ID]["reply_to"][1:self.data_len[index]])]
            tree = Tree(root_idx=0)
            tree.Construct(edges)
            assert tree.size() == self.data_len[index]
            self.data_trees.append(tree)

    def collate_raw_batch(self, batch):
        seqs = [item[0] for item in batch]
        trees = [item[1] for item in batch]
        labels = [item[2] for item in batch]
        topic_labels = [item[3] for item in batch]
        return seqs, trees, torch.tensor(labels).argmax(dim=1), torch.tensor(topic_labels)

    def __getitem__(self, index):
        d_ID = self.data_ID[index]
        if not getattr(self, "data_trees", False):
            self.init_trees()
        tree = self.data_trees[index]
        seq = [" ".join(self.lemma(self.data[d_ID]['text'][j])) for j in range(self.data_len[index])]
        assert len(seq) == (tree.size())
        return seq, tree, self.data_y[index], self.data[self.data_ID[index]]['topic_label']

class DGLTDTree(TreeTwitterSet):
    def __init__(self, batch_size=20):
        super(DGLTDTree, self).__init__(batch_size=batch_size)

    def init_trees(self):
        self.data_trees = []
        for index, d_ID in enumerate(tqdm(self.data_ID)):
            tIds_dic = {}
            dup_cnt = 0
            dup_idxs = []
            for idx, ID in enumerate(self.data[d_ID]["tweet_id"]):
                if ID in tIds_dic:
                    self.data_len[index] -= 1
                    dup_cnt += 1
                    dup_idxs.append(idx)
                else:
                    tIds_dic[ID] = idx - dup_cnt
            for i, idx in enumerate(dup_idxs):
                self.data[d_ID]["tweet_id"].pop(idx - i)
                self.data[d_ID]["reply_to"].pop(idx - i)
                self.data[d_ID]["text"].pop(idx - i)
                self.data[d_ID]["sentence"].pop(idx - i)
                self.data[d_ID]["created_at"].pop(idx - i)
                self.data[d_ID]["reply_idx"].pop(idx - i)
            src_tensor = torch.tensor([tIds_dic[src_ID]
                                        for src_ID in self.data[d_ID]["tweet_id"][1:]]
                                      , dtype=torch.int64
                                     )
            dst_tensor = torch.tensor([tIds_dic[dst_ID] if dst_ID in tIds_dic else 0
                                                for dst_ID in self.data[d_ID]["reply_to"][1:]]
                                      , dtype=torch.int64
                                     )
            tree = dgl.graph((src_tensor, dst_tensor),
                                num_nodes=len(self.data[d_ID]["tweet_id"]),
                                idtype=torch.int32
                                # device="cuda:0" if torch.cuda.is_available() else "cpu"
                            )#.add_self_loop()
            self.data_trees.append(tree)

    def __getitem__(self, index):
        d_ID = self.data_ID[index]
        if not getattr(self, "data_trees", False):
            self.init_trees()
        tree = self.data_trees[index]
        seq = [" ".join(self.lemma(self.data[d_ID]['text'][j])) for j in range(self.data_len[index])]
        return seq, tree, self.data_y[index], self.data[self.data_ID[index]]['topic_label']



class BiGCNTwitterSet(TwitterSet):
    def __init__(self, batch_size=20):
        super(BiGCNTwitterSet, self).__init__(batch_size=batch_size)
        self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

    def collate_raw_batch(self, batch):
        seqs = [item[0] for item in batch]
        TD_graphs = [item[1] for item in batch]
        BU_graphs = [item[2] for item in batch]
        labels = [item[3] for item in batch]
        topic_labels = [item[4] for item in batch]
        return seqs, TD_graphs, BU_graphs, torch.tensor(labels).argmax(dim=1), torch.tensor(topic_labels)

    def __getitem__(self, index):
        d_ID = self.data_ID[index]
        if not hasattr(self, "g_TD"):
            self.g_TD = {}
        if not hasattr(self, "g_BU"):
            self.g_BU = {}
        if not hasattr(self, "lemma_text"):
            self.lemma_text = {}

        if d_ID in self.g_TD and d_ID in self.g_BU:
            g_TD, g_BU = self.g_TD[d_ID], self.g_BU[d_ID]
        else:
            tIds_dic = {}
            dup_cnt = 0
            dup_idxs = []
            for idx, ID in enumerate(self.data[d_ID]["tweet_id"][:self.data_len[index]]):
                if ID in tIds_dic:
                    self.data_len[index] -= 1
                    dup_cnt += 1
                    dup_idxs.append(idx)
                else:
                    tIds_dic[ID] = idx - dup_cnt
            for idx in dup_idxs:
                    self.data[d_ID]["tweet_id"].pop(idx)
                    self.data[d_ID]["reply_to"].pop(idx)
                    self.data[d_ID]["text"].pop(idx)
                    self.data[d_ID]["sentence"].pop(idx)
                    self.data[d_ID]["created_at"].pop(idx)
                    self.data[d_ID]["reply_idx"].pop(idx)
            edges = [(src_idx, tIds_dic[dst_ID])
                        for src_idx, dst_ID in enumerate(self.data[d_ID]["reply_to"][:self.data_len[index]]) \
                            if dst_ID in tIds_dic]
            src = np.array([item[0] for item in edges])
            dst = np.array([item[1] for item in edges])
            g_TD = dgl.graph((src, dst), num_nodes=self.data_len[index])
            g_BU = dgl.graph((dst, src), num_nodes=self.data_len[index])
            self.g_TD[d_ID] = g_TD
            self.g_BU[d_ID] = g_BU

        if index in self.lemma_text:
            seq = self.lemma_text[index]
        else:
            seq = [" ".join(self.lemma(self.data[self.data_ID[index]]['text'][j])) for j in range(self.data_len[index])]
            self.lemma_text[index] = seq

        assert len(seq) == g_TD.num_nodes() and len(seq) == g_TD.num_nodes()
        return (seq, dgl.add_self_loop(g_TD), \
               dgl.add_self_loop(g_BU), \
               self.data_y[index], \
               self.data[self.data_ID[index]]['topic_label'])

class DA_TwitterSet(TwitterSet):
    def __init__(self, w2v_file="./word2vec.txt"):
        super(DA_TwitterSet, self).__init__(w2v_file=w2v_file)
        self.tfidf = Text2TFIDF()

    def WordReplace(self, event_ID, replace_cnt=3):
        texts = [" ".join(text) for text in self.data[event_ID]['text']]
        arrs, words = self.tfidf.text2tfidf(texts)
        import_ids = arrs.argsort(axis=1)[:, -replace_cnt:]
        import_words = [[words[id] for id in ids] for ids in import_ids]
        for idx, words in enumerate(import_words):
            for word in words:
                if word in ['url', '[', ']', '。', ',', '，', '{', '}', '(', ')', '!', '！', '~', '～', '"', ':', '+', '-', '$', '.', '?', '<', '>', '|', '=']:
                    pass
                elif word in self.w2v:
                    texts[idx] = texts[idx].replace(word, self.w2v.most_similar(word)[0][0])
                else:
                    pass
        self.data[event_ID]['text'] = [text.split() for text in texts]

    def WordAdd(self, event_ID):
        for idx, wl in enumerate(self.data[event_ID]['text']):
            sent_vec = np.stack([self.w2v[w] for w in wl if w in self.w2v]).mean(axis=0)
            new_word = self.w2v.similar_by_vector(-1*sent_vec)[0][0]
            self.data[event_ID]['text'][idx].append(new_word)

    def WordDelete(self, event_ID, del_cnt=1):
        texts = [" ".join(text) for text in self.data[event_ID]['text']]
        arrs, words = self.tfidf.text2tfidf(texts)
        unimport_ids = arrs.argsort(axis=1)[:, :del_cnt]
        unimport_words = [[words[id] for id in ids] for ids in unimport_ids]
        pdb.set_trace()
        for idx, words in enumerate(unimport_words):
            for word in words:
                texts[idx] = texts[idx].replace(word, "")
        self.data[event_ID]['text'] = [text.split() for text in texts]

    def RandomlyInsert(self, event_ID, insert_num=2):
        for i in range(insert_num):
            seq_len = len(self.data[event_ID]['text'])
            content_ID = random.sample(self.data_ID, 1)[0]
            content = random.sample(self.data[content_ID]['text'][1:], 1)[0]
            if len(content) != 0:
                insert_idx = random.sample(list(range(seq_len)), 1)[0]
                self.data[event_ID]['text'].insert(insert_idx + 1, content)

    def EarlyDelete(self, event_ID, delete_num=1):
        pass

    def EarlyInsert(self, event_ID, insert_num=1, sents=None):
        for i in range(insert_num):
            if sents is not None:
                content = random.sample(sents, 1)[0]
            else:
                content = []
                while len(content) == 0:
                    try:
                        content_ID = random.sample(self.data_ID, 1)[0]
                        content = random.sample(self.data[content_ID]['text'][1:], 1)[0]
                    except ValueError:
                        pass
            self.data[event_ID]['text'].insert(1, content)

    def Exchange(self, event_ID, ratio=0.5):
        texts = [text for text in self.data[event_ID]['text']]
        samples_cnt = int(len(texts)*ratio)
        sample_idxs = random.sample(list(range(1, len(texts), 1)), samples_cnt)
        new_idxs = random.sample(sample_idxs, len(sample_idxs))
        for i in range(len(sample_idxs)):
            self.data[event_ID]['text'][sample_idxs[i]] = texts[new_idxs[i]]
        return

class RLSet(TwitterSet):
    def __init__(self, cached_data_prefix="../data/twitter_dev"):
        super(RLSet, self).__init__()
        self.load_data_fast(cached_data_prefix)
        print("len: ", len(self.data_ID))
        self.idxs = list(range(len(self.data_ID)))

        self.embedding_dim = 300
        self.hidden_dim = 256
        self.max_sent_len = max([max([len(words) for words in self.data[ID]['text']]) for ID in self.data_ID])
        self.reward_counter = 0
        self.reward_rate = 0.9
        self.discount_rate = 0.9
        self.action_sample_ratio = 0.1

    def GenerateAct(self, QVals):
        isStop = torch.zeros(QVals.shape)
        rst_idxs = QVals.argmax(dim=1)
        for i in range(len(QVals)):
            if random.random() < self.action_sample_ratio:
                act = random.randint(0, 1)
                isStop[i][act] = 1
            else:
                isStop[i][rst_idxs[i]] = 1
        return isStop

    def update_Qval(self, batch_idxs, batch_seq_ids, QVals, h_states):
        Acts = self.GenerateAct(QVals)
        rw, _ = self.get_reward(Acts.argmax(dim=1),
                                batch_idxs,
                                batch_seq_ids,
                                cuda=False)
        for i in range(self.batch_size):
            self.data[self.data_ID[batch_idxs[i]]]['action'][batch_seq_ids[i]] = Acts[i]
            self.data[self.data_ID[batch_idxs[i]]]['qval'][batch_seq_ids[i]] = QVals[i]
            self.data[self.data_ID[batch_idxs[i]]]['reward'][batch_seq_ids[i]] = rw[i]
            if batch_seq_ids[i] + 1 < self.data_len[batch_idxs[i]]:
                self.data[self.data_ID[batch_idxs[i]]]['h_in'][batch_seq_ids[i] + 1] = h_states[i, :]

    def get_rl_batch(self):
        batch_idxs = random.sample(self.idxs, self.batch_size)
        batch_seq_ids = [random.randint(0, self.data_len[idx] - 1) for idx in batch_idxs]
        Acts = torch.zeros([self.batch_size, 2])
        input_x = torch.zeros([self.batch_size, self.max_sent_len, self.embedding_dim], dtype=torch.float32)
        QVals = torch.zeros([self.batch_size], dtype=torch.float32)
        h_states = torch.zeros([self.batch_size, self.hidden_dim])

        for i in range(self.batch_size):
            word_list = self.data[self.data_ID[batch_idxs[i]]]['text'][batch_seq_ids[i]]
            wordvecs = self.w2v.wordlist2wordvecs(word_list)
            input_x[i, :len(word_list), :] = torch.tensor(np.stack(wordvecs))
            if batch_seq_ids[i] == self.data_len[batch_idxs[i]] - 1:
                QVals[i] = self.data[self.data_ID[batch_idxs[i]]]['reward'][batch_seq_ids[i]]
            else:
                QVals[i] = self.data[self.data_ID[batch_idxs[i]]]['reward'][batch_seq_ids[i]] + \
                           self.discount_rate * max(
                    self.data[self.data_ID[batch_idxs[i]]]['qval'][batch_seq_ids[i] + 1])
            Acts[i] = self.data[self.data_ID[batch_idxs[i]]]['action'][batch_seq_ids[i]]
            h_states[i, :] = self.data[self.data_ID[batch_idxs[i]]]['h_in'][batch_seq_ids[i]]
        return input_x, QVals, Acts, h_states, batch_idxs, batch_seq_ids

    def get_raw_rl_batch(self):
        batch_idxs = random.sample(self.idxs, self.batch_size)
        batch_seq_ids = [random.randint(0, self.data_len[idx] - 1) for idx in batch_idxs]
        Acts = torch.zeros([self.batch_size, 2])
        # input_x = torch.zeros([self.batch_size, self.max_sent_len, self.embedding_dim], dtype=torch.float32)
        input_sents = []
        QVals = torch.zeros([self.batch_size], dtype=torch.float32)
        h_states = torch.zeros([self.batch_size, self.hidden_dim])

        for i in range(self.batch_size):
            input_sents.append( " ".join(self.data[self.data_ID[batch_idxs[i]]]['text'][batch_seq_ids[i]]))
            if batch_seq_ids[i] == self.data_len[batch_idxs[i]] - 1:
                QVals[i] = self.data[self.data_ID[batch_idxs[i]]]['reward'][batch_seq_ids[i]]
            else:
                QVals[i] = self.data[self.data_ID[batch_idxs[i]]]['reward'][batch_seq_ids[i]] + \
                           self.discount_rate * max(
                    self.data[self.data_ID[batch_idxs[i]]]['qval'][batch_seq_ids[i] + 1])
            Acts[i] = self.data[self.data_ID[batch_idxs[i]]]['action'][batch_seq_ids[i]]
            h_states[i, :] = self.data[self.data_ID[batch_idxs[i]]]['h_in'][batch_seq_ids[i]]
        return input_sents, QVals, Acts, h_states, batch_idxs, batch_seq_ids

    def get_reward(self, isStop, batch_idxs, batch_seq_ids, cuda=False):
        reward = torch.zeros([len(isStop)], dtype=torch.float32)
        Q_Val = torch.zeros([len(isStop)], dtype=torch.float32)
        for i in range(len(isStop)):
            if isStop[i] == 1:
                if np.argmax(self.data_y[batch_idxs[i]]) == 1:
                    self.reward_counter += 1  # more number of correct prediction, more rewards
                    r = 1 + self.reward_rate * math.log(1 + self.reward_counter)
                    reward[i] = r
                else:
                    reward[i] = -100
                Q_Val[i] = reward[i]  # Agent will stop
            elif batch_seq_ids[i] == self.data_len[batch_idxs[i]] - 1:
                reward[i] = - 0.1
                Q_Val[i] = reward[i]  # Agent will stop
            else:
                reward[i] = - 0.1
                Q_Val[i] = reward[i] + 0.99 * max(
                    self.data[self.data_ID[batch_idxs[i]]]['qval'][batch_seq_ids[i] + 1])
        return reward, Q_Val

    def update_length(self, RLModel):
        pass