import csv
import pickle
import json

import dgl
import torch
import networkx as nx
import pandas as pd
import re
import pickle as pkl
from torch.utils.data import Dataset, DataLoader


class LocationTweetDataset(Dataset):
    def __init__(self, root_path, dataset, suffix, min_len=0, celebrity_threshold=5):
        super(LocationTweetDataset, self).__init__()
        self.root_path = root_path
        self.tweets = self.load_tweets(dataset, suffix)
        self.labels = self.load_labels(dataset, suffix)
        if suffix == 'train':
            self.graph = self.load_graph(dataset, celebrity_threshold=celebrity_threshold)
            self.graph_feat = self.load_graph_feat(dataset)
            assert self.graph.number_of_nodes() == self.graph_feat.shape[0], \
                "The number of nodes in graph mismatch the user number of graph feature, but get {} and {}". \
                    format(self.graph.number_of_nodes(), self.graph_feat.shape[0])

            self.graph = dgl.from_networkx(self.graph).to('cuda')
            self.graph = dgl.add_self_loop(self.graph)
            self.graph_feat = torch.from_numpy(self.graph_feat / 1.0).to(torch.float32).cuda()
        self.min_len = min_len

    def load_tweets(self, dataset, suffix):
        with open('{}/{}/text_emb.{}.{}_nostop.pkl'.format(self.root_path, dataset, dataset, suffix), 'rb') as f:
            tweets = pickle.load(f)
        return list(tweets.values())

    def load_graph(self, dataset, celebrity_threshold=5):
        train_file = '{}/{}/user_info/user_info.train.csv'.format(self.root_path, dataset)
        valid_file = '{}/{}/user_info/user_info.dev.csv'.format(self.root_path, dataset)
        test_file = '{}/{}/user_info/user_info.test.csv'.format(self.root_path, dataset)

        df_train = pd.read_csv(train_file,
                               sep='\t',
                               encoding='latin1',
                               names=['user', 'lat', 'lon', 'text'],
                               quoting=csv.QUOTE_NONE, error_bad_lines=False)
        df_valid = pd.read_csv(valid_file,
                               sep='\t',
                               encoding='latin1',
                               names=['user', 'lat', 'lon', 'text'],
                               quoting=csv.QUOTE_NONE, error_bad_lines=False)
        df_test = pd.read_csv(test_file,
                              sep='\t',
                              encoding='latin1',
                              names=['user', 'lat', 'lon', 'text'],
                              quoting=csv.QUOTE_NONE, error_bad_lines=False)

        df_tmp = pd.merge(df_train, df_valid, how='outer')
        df_user = pd.merge(df_tmp, df_test, how='outer')

        print("Constructing graph from the tweets...")

        g = nx.Graph()
        origin_nodes = df_user['user'].tolist()
        node_id = {node: id for id, node in enumerate(origin_nodes)}
        # record node_id
        # save_nodes
        # with open('../{}/{}/user_nodeid.json'.format(dataset), 'w', encoding='utf-8') as jf:
        #     json.dump(node_id, jf)

        g.add_nodes_from(node_id.values())
        pattern1 = '(?<=^|(?<=[^a-zA-Z0-9-_\\.]))@([A-Za-z]+[A-Za-z0-9_]+)'
        pattern2 = '(?<=^|(?<=[^a-zA-Z0-9-_\\.]))#([A-Za-z]+[A-Za-z0-9_]+)'
        pattern1 = re.compile(pattern1)
        pattern2 = re.compile(pattern2)


        for i in range(len(df_user)):
            user = df_user.user[i]
            user_id = node_id[user]
            mentions = [m.lower() for m in pattern1.findall(df_user.text[i])]  # find mentions from ith user's text
            topics = [t.lower() for t in pattern2.findall(df_user.text[i])]

            # TODO: maybe need mention times
            # mention words can be added here,mention times can be get
            # mention_times = {}
            # for mention in mentions:
            #     if mention not in mention_times:
            #         mention_times[mention] = mentions.count(mention)

            idmentions = set()  # userid for the mentioned user
            for m in mentions:
                if m in node_id:  #
                    idmentions.add(node_id[m])
                else:  # generage new id for mentioned  user that not it the existing user list
                    id = len(node_id)
                    node_id[m] = id
                    idmentions.add(id)
            if len(idmentions) > 0:  # add mentioned users into the graph
                g.add_nodes_from(idmentions)

            topic_mentions = set()
            for t in topics:
                if t in topic_mentions:
                    topic_mentions.add(node_id[t])
                else:
                    topic = len(node_id)
                    node_id[t] = topic
                    topic_mentions.add(topic)
            if len(topic_mentions) > 0:
                g.add_nodes_from(topic_mentions)
            # add_edges
            # TODO: maybe weights
            for id in idmentions:
                if not g.has_edge(user_id, id):
                    g.add_edge(user_id, id)
                else:
                    ...

        # delete celebrity
        celebrities = []
        for i in range(len(origin_nodes), len(node_id)):
            deg = g.degree(i)
            if deg == 1 or deg > celebrity_threshold:
                celebrities.append(i)
        g.remove_nodes_from(celebrities)
        print('Removed {} celebrity nodes with degree higher than {}'.format(len(celebrities), celebrity_threshold))

        # add undirect edges
        all_nodes = set(g.nodes())
        for node in all_nodes:
            if node < len(origin_nodes):
                continue
            else:
                nbrs = g.neighbors(node)
                nbrList = [nbr for nbr in nbrs if nbr in origin_nodes]
                for node1 in nbrList:
                    for node2 in nbrList:
                        if not g.has_edge(node1, node2):
                            g.add_edge(node1, node2)
                g.remove_node(node)

        print("Graph's nodes num: %d" % len(g.nodes))
        print("Original nodes num: %d" % len(origin_nodes))
        print("-" * 20)
        return g

    def load_graph_feat(self, dataset):
        graph_feat_path = '{}/{}/cmuuser_tfidf_mat.pkl'.format(self.root_path, dataset)
        with open(graph_feat_path, 'rb') as fp:
            graph_feat = pkl.load(fp)
        return graph_feat

    def load_labels(self, dataset, suffix):
        loc_list = []
        with open('{}/{}/user_loc.csv'.format(self.root_path, dataset), 'r', encoding='utf-8') as cf:
            lines = csv.reader(cf)
            for line in lines:
                loc_list.append(int(line[0]))
        train_size = len(self.load_tweets(dataset, 'train'))
        valid_size = len(self.load_tweets(dataset, 'valid'))
        test_size = len(self.load_tweets(dataset, 'test'))
        assert train_size + valid_size + test_size == len(loc_list)

        if suffix == 'train':
            loc_list = loc_list[0:train_size]
        elif suffix == 'valid':
            loc_list = loc_list[train_size:train_size + valid_size]
        elif suffix == 'test':
            loc_list = loc_list[train_size + valid_size:train_size + valid_size + test_size]

        return loc_list

    def __add__(self, other):
        ...

    def __getitem__(self, item):
        for tweet in self.tweets[item]:
            if len(tweet) <= self.min_len:
                continue
        return item, self.tweets[item], self.labels[item]

    def __len__(self):
        return len(self.tweets)


def load_geoloc(root_path, dataset):
    with open('{}/{}/classLatMedian.json'.format(root_path, dataset), 'r', encoding='utf-8') as lat_file:
        classLatMedian = json.load(lat_file)

    with open('{}/{}/classLonMedian.json'.format(root_path, dataset), 'r', encoding='utf-8') as lon_file:
        classLonMedian = json.load(lon_file)

    return classLatMedian, classLonMedian


def tweets_collate_fn(data):
    user_idx_list = []
    tweet_len_list = []
    tweets_num_list = []
    labels = []
    for user_idx, tweets, label in data:
        for tweet in tweets:
            tweet_len_list.append(len(tweet))
        user_idx_list.append(user_idx)
        tweets_num_list.append(len(tweets))
        labels.append(label)
    max_tweet_len = max(tweet_len_list)

    all_tweet = torch.zeros(sum(tweets_num_list), max_tweet_len).long()
    idx = 0
    for _, tweets, _ in data:
        for tweet in tweets:
            all_tweet[idx][: len(tweet)] = torch.LongTensor(tweet)
            idx = idx + 1

    return torch.LongTensor(user_idx_list), all_tweet, tweets_num_list, labels


if __name__ == '__main__':
    dataset = LocationTweetDataset(root_path='../../dataset', dataset='cmu', suffix='train')
    print(dataset.graph)
    dataloader = DataLoader(dataset, batch_size=10, shuffle=True, collate_fn=tweets_collate_fn)
    for i in dataloader:
        print(i)
