import os
import pickle
from datetime import timedelta
from os import path as osp

import networkx as nx
import numpy as np
import pandas as pd
import torch
from node2vec import Node2Vec
from torch.utils.data import TensorDataset
from tqdm.auto import tqdm


class Node2VecDataset(TensorDataset):
    sample_rates = {
        'topic': 0.5,
        'repost': 0.5,
        'twitter': 0.5,
    }

    def __init__(self, root_dir='F:\\Python-projects\\DatasetAnalysis\\data\\dataset',
                 dataset_name='topic', observation=2, save_dir='data', sample_rates=None,
                 follower_hop=0,
                 virtual_node=None,
                 reload_graphs=False, reload_tensors=False, save=True,
                 **kwargs):
        self.root_dir = root_dir
        self.save_dir = save_dir
        self.dataset_name = dataset_name
        self.observation = observation
        self.virtual_node = virtual_node

        self.cascade_dir = osp.join(self.root_dir, f'{dataset_name}_cascades')
        self._reload_graphs = reload_graphs
        self._reload_tensors = reload_tensors
        self.save = save
        if sample_rates is not None:
            self.sample_rates = sample_rates

        self.sample_rate = self.sample_rates[self.dataset_name]

        self.name = f'{self.dataset_name}_{self.observation}_{self.sample_rate}'
        assert follower_hop >= 0
        self.follower_hop = follower_hop
        if follower_hop:
            self.name += f'_{follower_hop}Hop'
        if virtual_node:
            assert virtual_node in ['all', 'source']
            self.name += f'_v{virtual_node[0]}'

        self.graph_save_path = osp.join(self.save_dir, self.name + '.graph')
        self.tensor_save_path = osp.join(self.save_dir, self.name + '.emb')
        self.graph_list = []
        self.label_list = []
        self.tensor_list = []
        self._follow_graph = None
        self.node2vec_kwargs = kwargs
        self.load()
        embedding_tensor = torch.tensor(self.tensor_list, dtype=torch.float)
        embedding_tensor = torch.squeeze(embedding_tensor, 1)
        label_tensor = torch.tensor(self.label_list, dtype=torch.float)
        label_tensor = torch.unsqueeze(label_tensor, 1)
        super(Node2VecDataset, self).__init__(embedding_tensor, label_tensor)

    def load(self):
        if self.reload_graphs:
            self.load_graphs()
        else:
            with open(self.graph_save_path, 'rb') as f:
                self.graph_list, self.label_list = pickle.load(f)
        if self.reload_tensors:
            self.load_tensors()
        else:
            with open(self.tensor_save_path, 'rb') as f:
                self.tensor_list = pickle.load(f)

    # @property
    # def name(self):
    #     return f'{self.dataset_name}_{self.observation}_{self.sample_rate[self.dataset_name]}'

    @property
    def reload_tensors(self):
        has_cache = osp.exists(self.tensor_save_path)
        if not has_cache:
            return True
        if self.reload_graphs:
            return True
        return self._reload_tensors

    @property
    def follow_graph(self) -> nx.DiGraph:
        if self._follow_graph is None:
            p = osp.join(self.root_dir, 'global', f'{self.dataset_name}_relationships.txt')
            with open(p, 'r', encoding='utf8') as f:
                lines = [l.strip().split(',') for l in f.readlines()]
                self._follow_graph = nx.from_edgelist(lines, nx.DiGraph)
        return self._follow_graph

    def add_virtual_node(self, g: nx.DiGraph):
        if self.virtual_node is None:
            return g
        nodes_to_connect = []
        if self.virtual_node == 'all':
            nodes_to_connect.extend(g.nodes())
        else:
            l = [n for n, d in dict(g.in_degree()).items() if d == 0]
            nodes_to_connect.extend(l)
        v_node = '__V'
        for n in nodes_to_connect:
            g.add_edge(v_node, n)
            g.add_edge(n, v_node)
        return g

    @property
    def reload_graphs(self):
        has_cache = osp.exists(self.graph_save_path)
        if not has_cache:
            return True
        return self._reload_graphs

    def follower_sample(self, graph: nx.DiGraph):
        node_set = set()
        nodes_to_sample = set(graph.nodes())
        for i in range(self.follower_hop):
            node_set.update(nodes_to_sample)
            follower = set()
            for u in nodes_to_sample:
                if u not in self.follow_graph.nodes():
                    continue
                for v in self.follow_graph.neighbors(u):
                    if v not in follower and v not in node_set:
                        follower.add(v)
            nodes_to_sample = follower
        node_set.update(nodes_to_sample)
        follow_g = self.follow_graph.subgraph(node_set)
        num_e = follow_g.number_of_edges()
        mdg = nx.MultiDiGraph()
        follow_edges = list(follow_g.edges())[:min(3000, num_e)]
        repost_edges = graph.edges()
        mdg.add_edges_from(follow_edges)
        mdg.add_edges_from(repost_edges)
        # graph.add_edges_from(follow_edges)
        return mdg

    def load_graphs(self):
        files = filter(lambda x: x.endswith('csv'), os.listdir(self.cascade_dir))
        files = list(files)
        np.random.shuffle(files)
        tot = len(files)
        limit = int(tot * self.sample_rate)
        for f in tqdm(files[:limit], desc='loading cascades', total=limit):
            p = osp.join(self.cascade_dir, f)
            graph, label = self.read_csv(p)
            if graph.number_of_nodes() < 10:
                continue
            graph = self.add_virtual_node(graph)
            if self.follower_hop:
                graph = self.follower_sample(graph)

            self.graph_list.append(graph)
            self.label_list.append(label)
        with open(self.graph_save_path, 'wb') as f:
            pickle.dump((self.graph_list, self.label_list), f)

    def load_tensors(self):
        # for g in self.graph_list:
        for g in tqdm(self.graph_list, desc='node2vec', total=len(self.graph_list)):
            node2vec = Node2Vec(g, quiet=True, **self.node2vec_kwargs)
            model = node2vec.fit(window=10, min_count=1, batch_words=4, )
            vec = model.wv.vectors
            tensor = np.sum(vec, 0).reshape((1, vec.shape[-1]))  # summed vector on dimension 0
            # tensor = torch.tensor(vec)
            # tensor = tensor.sum(0)
            # np.sum()
            # tensor = torch.unsqueeze(tensor, 0)
            self.tensor_list.append(tensor)

        if self.save:
            with open(self.tensor_save_path, 'wb') as f:
                pickle.dump(self.tensor_list, f)
        pass

    def read_csv(self, path):
        df = pd.read_csv(path)
        label = len(df)
        df['created_at'] = pd.to_datetime(df['created_at'])
        start = df.loc[0, 'created_at']
        end = start + timedelta(hours=self.observation)
        ob_df = df[df['created_at'] <= end]
        ob_df['origin_uid'].fillna(value=ob_df['uid'], inplace=True)
        g = nx.from_pandas_edgelist(ob_df, 'uid', 'origin_uid', create_using=nx.DiGraph)
        return g, label


if __name__ == '__main__':
    for ds in ['topic', 'repost', 'twitter']:
        for ob in [2, 24]:
            d = Node2VecDataset(dataset_name=ds, observation=ob, sample_rates={
                'topic': 0.05,
                'repost': 0.05,
                'twitter': 1,
            },
                                # reload_graphs=True,
                                # reload_tensors=True,
                                save=False
                                )
            print(d[0], d[0][0].shape)
