import time
import pandas as pd
import pickle
import numpy as np
import logging
from torch_geometric.data import Data
import torch
from torch_geometric.utils import to_torch_coo_tensor, to_dense_adj
from torch_sparse import coalesce
import networkx as nx


class Data1:
    def __init__(self, data, is_split=False):
        self.srcs = data['src'].values
        self.dsts = data['dst'].values
        self.times = data['abs_time'].values
        self.trans_cascades = data['cas'].values
        self.pub_times = data['pub_time'].values
        self.labels = data['label'].values
        self.length = len(self.srcs)
        self.is_split = is_split
        if is_split:
            self.types = data['type'].values

    def loader(self, batch):
        for i in range(0, len(self.srcs), batch):
            right = min(i + batch, self.length)
            if self.is_split:
                yield (self.srcs[i:right], self.dsts[i:right], self.trans_cascades[i:right],
                       self.times[i:right], self.pub_times[i:right], self.types[i:right]), self.labels[i:right]
            else:
                yield (self.srcs[i:right], self.dsts[i:right], self.trans_cascades[i:right],
                       self.times[i:right], self.pub_times[i:right]), self.labels[i:right]


# 获取级联在观测时间之前的标签
def get_label(x: pd.DataFrame, observe_time, label):
    id = np.searchsorted(x['time'], observe_time, side='left')  # id 表示observer_time 可插入的位置  如果有相同的值  则插入到该元素的左侧
    casid = x['cas'].values[0]
    if casid in label and id >= 10:
        length = min(id, 100) - 1  # 这里-1 是因为id是从1开始的 但是数组的索引是从0开始的
        x['label'].iloc[length] = label[casid] - id  # 在观测时间之后有多少次转发 作为观测时间前一时刻的label
        return [x.iloc[:length + 1, :]]  # 返回观测时间前的数据
    else:
        return []


def data_transformation(dataset, data, time_unit, min_time, param):
    if dataset == 'aps':
        data['pub_time'] = (pd.to_datetime(data['pub_time']) - pd.to_datetime(min_time)).apply(lambda x: x.days)
    else:
        data['pub_time'] -= min_time
    data['abs_time'] = (data['pub_time'] + data['time']) / time_unit
    data['pub_time'] /= time_unit
    data['time'] /= time_unit
    data.sort_values(by=['abs_time', 'id'], inplace=True, ignore_index=True)
    users = list(set(data['src']) | set(data['dst']))
    ids = list(range(len(users)))
    max_uids = max(ids)
    user2id, id2user = dict(zip(users, ids)), dict(zip(ids, users))
    # id2user的字典中key最大是179288  那么可以从下一个开始排cas的id
    cases = list(set(data['cas']))
    ids = list(range(len(cases)))
    # ids中每个元素加上max_users + 1 作为新的ids
    ids = [i + max_uids + 1 for i in ids]
    cas2id, id2cas = dict(zip(cases, ids)), dict(zip(ids, cases))
    data['src'] = data['src'].apply(lambda x: user2id[x])
    data['dst'] = data['dst'].apply(lambda x: user2id[x])
    data['cas'] = data['cas'].apply(lambda x: cas2id[x])
    param['node_num'] = {'user': max(max(data['src']), max(data['dst'])) + 1, 'cas': max(data['cas']) + 1}
    param['max_global_time'] = max(data['abs_time'])
    pickle.dump({'user2id': user2id, 'id2user': id2user, 'cas2id': cas2id, 'id2cas': id2cas},
                open(f'data/{dataset}_idmap.pkl', 'wb'))


def get_split_data(dataset, observe_time, predict_time, time_unit, all_data, min_time, metadata, log, param):
    def data_split(legal_cascades, train_portion=0.7, val_portion=0.15):
        """
        set cas type, 1 for train cas, 2 for val cas, 3 for test cas , and 0 for other cas that will be dropped
        """
        m_metadata = metadata[metadata['casid'].isin(set(legal_cascades))]
        all_idx, type_map = {}, {}
        if dataset == 'twitter':
            dt = pd.to_datetime(m_metadata['pub_time'], unit='s', utc=True).dt.tz_convert('Asia/Shanghai')
            idx = dt.apply(lambda x: not (x.month == 4 and x.day > 10)).values  # 3月所有的数据或者4月10日之前的数据 为True
        elif dataset == 'weibo':
            dt = pd.to_datetime(m_metadata['pub_time'], unit='s', utc=True).dt.tz_convert('Asia/Shanghai')
            idx = dt.apply(lambda x: 18 > x.hour >= 8).values
        elif dataset == 'aps':
            idx = pd.to_datetime(m_metadata['pub_time']).apply(lambda x: x.year <= 1997).values
        else:
            idx = np.array([True] * len(m_metadata))

        cas = m_metadata[idx]['casid'].values
        rng = np.random.default_rng(0)
        rng.shuffle(cas)  # 把合法的cas打乱
        train_pos, val_pos = int(train_portion * len(cas)), int((train_portion + val_portion) * len(cas))

        train_cas, val_cas, test_cas = np.split(cas, [train_pos, val_pos])
        all_idx['train'] = train_cas

        type_map.update(dict(zip(train_cas, [1] * len(train_cas))))
        all_idx['val'] = val_cas
        type_map.update(dict(zip(val_cas, [2] * len(val_cas))))
        all_idx['test'] = test_cas
        type_map.update(dict(zip(test_cas, [3] * len(test_cas))))
        reset_cas = set(metadata['casid']) - set(train_cas) - set(val_cas) - set(test_cas)
        type_map.update(dict(zip(list(reset_cas), [0] * len(reset_cas))))
        return all_idx, type_map

    #  取出时间小于预测时间的数据  对级联进行计数 计数为value  作为label
    all_label = all_data[all_data['time'] < predict_time * time_unit].groupby(by='cas', as_index=False)['id'].count()
    all_label = dict(zip(all_label['cas'], all_label['id']))  # 做成字典
    m_data = []
    for cas, df in all_data.groupby(by='cas'):  # 按照级联id进行分组 cas为级联id  df为级联数据
        m_data.extend(get_label(df, observe_time * time_unit, all_label))  # 在观测时间前的label
    all_data = pd.concat(m_data, axis=0)
    all_idx, type_map = data_split(all_data[all_data['label'] != -1]['cas'].values)  # 传入的是在观测时间的cas   26539条
    all_data['type'] = all_data['cas'].apply(lambda x: type_map[x])
    all_data = all_data[all_data['type'] != 0]
    """all_idx is used for baselines to select the cascade id, so it don't need to be remapped"""
    data_transformation(dataset, all_data, time_unit, min_time, param)
    all_data.to_csv(f'data/{dataset}_split1.csv', index=False)
    pickle.dump(all_idx, open(f'data/{dataset}_idx1.pkl', 'wb'))
    # 做标签
    valid_label_data = all_data[all_data["label"] != -1]
    # 取出cas 和 label
    valid_label_data = valid_label_data[["cas", "label"]]
    # 把cas和label转成字典
    all_label = dict(zip(valid_label_data["cas"], valid_label_data["label"]))

    with open("../data/twitter_label.pkl", "wb") as f:
        pickle.dump(all_label, f)

    log.info(
        f"Total Trans num is {len(all_data)}, Train cas num is {len(all_idx['train'])}, "
        f"Val cas num is {len(all_idx['val'])}, Test cas num is {len(all_idx['test'])}")
    return all_data


def get_data(dataset, observe_time, predict_time, train_time, val_time, test_time, time_unit,
             log: logging.Logger, param):
    a = time.time()
    """
    data stores all diffusion behaviors, in the form of (id,src,dst,cas,time). 
    The `id` refers to the id of the interaction; 
    `src`,`dst`,`cas`,`time` means that user `dst` forwards the message `cas` from `src`
    after `time` time has elapsed since the publication of cascade `cas`. 
    -----------------
    metadata stores the metadata of cascades, including the publication time, publication user, etc.
    """
    data: pd.DataFrame = pd.read_csv(f'data/{dataset}.csv')
    metadata = pd.read_csv(f'data/{dataset}_metadata.csv')
    min_time = min(metadata['pub_time'])
    data = pd.merge(data, metadata, left_on='cas', right_on='casid')  # cas = casid 的行进行合并
    data = data[['id', 'src', 'dst', 'cas', 'time', 'pub_time']]
    param['max_time'] = {'user': 1, 'cas': param['observe_time']}
    data['label'] = -1
    data.sort_values(by='id', inplace=True, ignore_index=True)
    log.info(
        f"Min time is {min_time}, Train time is {train_time}, Val time is {val_time}, Test time is {test_time}, Time unit is {time_unit}")
    return_data = get_split_data(dataset, observe_time, predict_time, time_unit, data, min_time,
                                 metadata, log, param)
    b = time.time()
    log.info(f"Time cost for loading data is {b - a}s")
    return return_data


#  传入的x是第一次处理好label的x
def get_label_ratio(x: pd.DataFrame, observe_time, slot):
    a = time.time()
    # 按照ratio的比例来划分x,将x划分成 observe_time / slot 个时间段  每个时间段的末尾的label等于observe_time之后的转发次数
    ratio = 1 / slot
    # 求出每个cas的label的最大值
    label = x.groupby("cas")["label"].max()
    # label 转成字典
    label = label.to_dict()

    # 根据cas分组  每个cas的数据都是连续的
    # 将每个cas数据划分成observe_time / slot个时间段
    # 每个时间段的末尾的label等于observe_time之后的转发次数
    for i in range(len(x)):
        # 如果i是最后一个元素 则跳出循环
        if i == len(x) - 1:
            break

        if ((x["time"].iloc[i] <= observe_time * ratio) & (x["time"].iloc[i + 1] > observe_time * ratio)) \
                | ((x["time"].iloc[i] <= observe_time * 2 * ratio) & (x["time"].iloc[i + 1] > observe_time * 2 * ratio)) \
                | ((x["time"].iloc[i] <= observe_time * 3 * ratio) & (x["time"].iloc[i + 1] > observe_time * 3 * ratio)) \
                | (
                (x["time"].iloc[i] <= observe_time * 4 * ratio) & (x["time"].iloc[i + 1] > observe_time * 4 * ratio)):
            cas = x["cas"].iloc[i]
            x["label"].iloc[i] = label[cas]
    b = time.time()
    print("get_label_ratio time: " + str(b - a))

    # 把x存成pkl文件
    with open('../data/twitter_train_graph_ob_time_all_slots.pkl', 'wb') as f:
        pickle.dump(x, f)


def trans_data_to_graph(pr, data0):
    # 做边
    edges1 = []
    edges2 = []
    for i in data0.itertuples():
        edges1.append(i.src), edges2.append(i.dst)
        edges1.append(i.src), edges2.append(i.cas)
        edges1.append(i.cas), edges2.append(i.dst)

    A0_1 = []
    # 把edges1中的每个元素和edges2中的每个元素组成一个元组放入A中
    for i in range(len(edges1)):
        A0_1.append((edges1[i], edges2[i]))

    # A 去重
    A0_1 = list(set(A0_1))

    # 把A1中元组的第一项做成一个列表，第二项做成一个列表 作为A的两个元素
    A0 = [list(i) for i in zip(*A0_1)]

    X0_0 = []
    scale_factor = 1
    # data转成list

    # X0_1 中的每一项是对应X1的type 判断x的类别，如果 0 <= id <= 179287  类别为1表示user   否则为2 表示级联
    X0_1 = []  # 类型
    X0_2 = []  # 编号
    max_id = max(np.max(A0[0]), np.max(A0[1])) + 1
    for i in range(max_id):
        if i <= 179287:
            X0_1.append(1)
            X0_2.append(pr[i] * scale_factor)

        else:
            X0_1.append(2)
            X0_2.append(pr[i] * scale_factor)

    X0 = [X0_1, X0_2]
    # X0 = [ X0_1, X0_2]
    # 把X 转成tensor向量 然后转置
    X0 = torch.tensor(X0, dtype=torch.float32).t()
    # 把A 转成tensor向量
    A0 = torch.tensor(A0, dtype=torch.int64)
    # A0 = to_torch_coo_tensor(A0)

    # 创建pyg的Data对象
    return Data(x=X0, edge_index=A0)


def make_node_pr_values(dataset):
    G = nx.DiGraph()
    # 加载所有的训练集
    with open(f"../data/{dataset}_split1.csv", "rb") as f:
        data = pd.read_csv(f)

    train_data = data[data["type"] == 1]
    X = []
    A = []
    for i in train_data.itertuples():
        X.append(i.src)
        X.append(i.dst)
        X.append(i.cas)
        A.append((i.src, i.dst))
        A.append((i.cas, i.dst))
        A.append((i.src, i.cas))

    X = list(set(X))
    G.add_nodes_from(X)
    A = list(set(A))
    G.add_edges_from(A)

    pr_values = nx.pagerank(G, alpha=0.85)

    # pickle.dump(pr_values, open("../data/twitter_pr_values.pkl", "wb"))
    return pr_values


def make_label(dataset):
    with open(f"../data/{dataset}_split1.csv", "rb") as f:
        data0 = pd.read_csv(f)

    data = data0[data0["label"] != -1]
    data = data[["cas", "label"]]
    dict = {}
    for i in data.itertuples():
        dict[i.cas] = i.label
    with open(f"../data/{dataset}_label.pkl", "wb") as f:
        pickle.dump(dict, f)
    print(data)

# if __name__ == '__main__':
#     # make_label("twitter")
#     with open("../data/twitter_label.pkl", "rb") as f:
#         label = pickle.load(f)
#     print()