import pickle
from typing import Dict

import math

import numpy as np
import pandas as pd
import torch.autograd
from torch.nn.modules.loss import _Loss
from torch_geometric.nn import GCNConv
from tqdm import tqdm

from model.DynamicGCN1_edit1 import DynamicGraphConvModel
from utils.data_processing import *
from torch.nn import functional as F

from utils.my_utils import load_model, save_model, Metric, EarlyStopMonitor, pcc, male, mape, msle
from torch_geometric.typing import torch_scatter


class GCNNet(torch.nn.Module):
    def __init__(self, input_dim, hidden_dim, num_layers):
        super(GCNNet, self).__init__()
        # 根据层数构建GCN层
        self.gcn_layers = torch.nn.ModuleList([GCNConv(input_dim, hidden_dim) for _ in range(num_layers)])
        self.final_layer = GCNConv(hidden_dim, hidden_dim)

    def forward(self, x, edge_index):
        for gcn_layer in self.gcn_layers:
            x = F.relu(gcn_layer(x, edge_index))
        x = self.final_layer(x, edge_index)
        x = F.normalize(x, p=2, dim=-1)  # L2 归一化
        return x


# 返回0时刻的x(n,3)和edge_index(e,2)
def load_graph0_and_change_data(dataset, type):
    with open("data/" + dataset + "_pr_values.pkl", "rb") as f:
        pr = pickle.load(f)

    with open("data/" + dataset + "_split1.csv", "rb") as f:
        data = pd.read_csv(f)

    data_t0 = data[data["time"] == 0]
    change_data = data[data["time"] != 0]
    change_data = change_data.sort_values(by="abs_time")
    # data = data.sort_values(by="abs_time")
    # # 取出时间在前10%的数据
    # data_t0 = data[:int(len(data) * 0.1)]
    # # 取出时间在后90%的数据
    # change_data = data[int(len(data) * 0.1):]
    #
    if type == "train":
        data_t0 = data_t0[data_t0["type"] == 1]
        change_data = change_data[change_data["type"] == 1]
    elif type == "val":
        data_t0 = data_t0[data_t0["type"] == 2]
        change_data = change_data[change_data["type"] == 2]
    elif type == "test":
        data_t0 = data_t0[data_t0["type"] == 3]
        change_data = change_data[change_data["type"] == 3]

    # 将data_t0转成图
    graph_t0 = trans_data_to_graph(pr, data_t0)
    # nodes_data = t0_data.x
    # x_full = torch.zeros(torch.max(t0_data.edge_index[0]+1), 2)
    # indices = nodes_data[:, 0].long()
    # x_full[indices] = nodes_data[:, 1:]
    # graph_t0 = Data(x=x_full, edge_index=t0_data.edge_index)
    # gcn = GCNNet(graph_t0.x.size(1), 64, 1)
    # h0 = gcn(graph_t0.x, graph_t0.edge_index)
    return graph_t0, change_data


def move_to_device(device, *args):
    results = []
    for arg in args:
        if type(arg) is torch.Tensor:
            results.append(arg.to(device=device))
        else:
            results.append(torch.tensor(arg, device=device))
    return results


def select_label(labels, types):
    train_idx = (labels != -1) & (types == 1)
    val_idx = (labels != -1) & (types == 2)
    test_idx = (labels != -1) & (types == 3)
    return {'train': train_idx, 'val': val_idx, 'test': test_idx}


def eval_model(model: DynamicGraphConvModel, dataset, type, pr, device: torch.device, param: Dict,
               metric: Metric, loss_criteria: _Loss, move_final: bool = False) -> Dict:
    eval_graph_t0, eval_change_data = load_graph0_and_change_data(dataset, type)
    eval = Data1(eval_change_data, is_split=True)
    eval_graph_t0.x, eval_graph_t0.edge_index = move_to_device(device, eval_graph_t0.x, eval_graph_t0.edge_index)
    h_adj = eval_graph_t0.edge_index
    model.init_graph(eval_graph_t0)
    model.eval()
    metric.fresh()
    epoch_metric = {}
    loss = {f'{type}': []}
    with torch.no_grad():
        for x, label in tqdm(eval.loader(param['bs']), total=math.ceil(eval.length / param['bs']), desc='eval_or_test'):
            src, dst, trans_cas, trans_time, pub_time, types = x
            index_dict = select_label(label, types)
            trans_time, pub_time, label = move_to_device(device, trans_time, pub_time, label)
            # 把 src dst trans_cas转成图 中的节点,边  此时的图作为一个时刻的图 与上一个时刻的图送入模型中 更新节点的embedding
            # 把src dst trans做成一个dataframe
            data = pd.DataFrame({"src": src.tolist(), "dst": dst.tolist(), "cas": trans_cas.tolist()})
            delta_graph = trans_data_to_graph(pr, data)
            delta_graph.x, delta_graph.edge_index = move_to_device(device, delta_graph.x, delta_graph.edge_index)
            delta_adj = delta_graph.edge_index
            now_adj = torch.unique(torch.cat((h_adj, delta_adj), dim=1), dim=1)
            h_adj = now_adj
            target_idx = index_dict[type]
            pred = model.forward(delta_adj, now_adj, trans_cas[target_idx])
            for dtype in [type]:
                idx = index_dict[dtype]
                if sum(idx) > 0:
                    m_target = trans_cas[idx]
                    m_label = label[idx]
                    m_label[m_label < 1] = 1
                    m_label = torch.log2(m_label)
                    m_pred = pred[m_target]
                    loss[dtype].append(loss_criteria(m_pred, m_label).item())
                    metric.update(target=m_target, pred=m_pred.cpu().numpy(), label=m_label.cpu().numpy(), dtype=dtype)
            # model.update_graph(delta_graph.x)
        for dtype in [f'{type}']:
            epoch_metric[dtype] = metric.calculate_metric(dtype, move_history=True, move_final=move_final,
                                                          loss=np.mean(loss[dtype]))
        return epoch_metric


def train_model(num: int, dataset, model: DynamicGraphConvModel, logger: logging.Logger,
                early_stopper: EarlyStopMonitor,
                device: torch.device, param: Dict, metric: Metric, result: Dict):
    graph_t0, change_data = load_graph0_and_change_data(dataset, type="train")
    train = Data1(change_data, is_split=True)
    graph_t0.x, graph_t0.edge_index = move_to_device(device, graph_t0.x, graph_t0.edge_index)
    h_adj = graph_t0.edge_index
    with open("data/" + dataset + "_pr_values.pkl", "rb") as f:
        pr = pickle.load(f)
    model = model.to(device)
    logger.info('Start training citation')
    optimizer = torch.optim.Adam(model.parameters(), lr=param['lr'])
    loss_criterion = torch.nn.MSELoss()
    with open("data/" + dataset + "_label.pkl", "rb") as f:
        label_all = pickle.load(f)

    for epoch in range(param['epoch']):
        model.init_graph(graph_t0)
        model.train()
        logger.info(f'Epoch {epoch}:')
        epoch_start = time.time()
        train_loss = []

        for x, label in tqdm(train.loader(param['bs']), total=math.ceil(train.length / param['bs']),  # 向上取整
                             desc='training'):
            src, dst, trans_cas, trans_time, pub_time, types = x
            idx_dict = select_label(label, types)
            trans_time, pub_time, label = move_to_device(device, trans_time, pub_time, label)
            # 把 src dst trans_cas转成图 中的节点,边  此时的图作为一个时刻的图 与上一个时刻的图送入模型中 更新节点的embedding
            # 把src dst trans做成一个dataframe
            data = pd.DataFrame({"src": src.tolist(), "dst": dst.tolist(), "cas": trans_cas.tolist()})
            delta_graph = trans_data_to_graph(pr, data)

            delta_graph.x, delta_graph.edge_index = move_to_device(device, delta_graph.x, delta_graph.edge_index)
            delta_adj = delta_graph.edge_index
            now_adj = torch.unique(torch.cat((h_adj, delta_adj), dim=1), dim=1)
            h_adj = now_adj
            target_idx = idx_dict['train']
            # target_idx 是到达观测时间的数据的索引

            # 只要有cas就进行backward 但是到达observer_time之后再把loss加到train_loss中
            if sum(target_idx) > 0:
                pred = model.forward(delta_adj, now_adj, trans_cas[target_idx])
                target, target_label, target_time = trans_cas[target_idx], label[target_idx], trans_time[target_idx]
                target_label[target_label < 1] = 1
                target_label = torch.log2(target_label)
                target_pred = pred[target]
                optimizer.zero_grad()
                loss = loss_criterion(target_pred, target_label)
                loss.backward()
                optimizer.step()
                train_loss.append(loss.item())

            # 找到级联的索引，未到达观测时间  cas_idx
            cas_idx = np.unique(trans_cas)
            pred = model.forward(delta_adj, now_adj, cas_idx)
            # 取出label_all中key为cas_idx的值
            label_idx = [label_all[i] for i in cas_idx]
            label_idx = torch.tensor(label_idx, device=device)
            label_idx[label_idx < 1] = 1
            label_idx = torch.log2(label_idx)
            loss = loss_criterion(pred[cas_idx], label_idx)
            loss.backward()
            optimizer.step()

        epoch_end = time.time()

        epoch_metric = eval_model(model, dataset, type="val", pr=pr, device=device, param=param, metric=metric,
                                  loss_criteria=loss_criterion, move_final=False)
        logger.info(
            f"Epoch{epoch}: time_cost:{epoch_end - epoch_start} train_loss:{np.mean(train_loss)}")

        for dtype in ['val']:
            metric.info(dtype)
        if early_stopper.early_stop_check(epoch_metric['val']['msle']):
            break
        else:
            ...
    logger.info('No improvement over {} epochs, stop training'.format(early_stopper.max_round))
    logger.info(f'Loading the best model at epoch {early_stopper.best_epoch}')
    load_model(model, param['model_path'], num)
    logger.info(f'Loaded the best model at epoch {early_stopper.best_epoch} for inference')
    final_metric = eval_model(model, dataset, type="test", pr=pr, device=device, param=param, metric=metric,
                              loss_criteria=loss_criterion, move_final=False)
    logger.info(f'Runs:{num}\n {metric.history}')
    metric.save()
    save_model(model, param['model_path'], num)

    result['msle'] = np.round(result['msle'] + final_metric['test']['msle'] / param['run'], 4)
    result['mape'] = np.round(result['mape'] + final_metric['test']['mape'] / param['run'], 4)
    result['male'] = np.round(result['male'] + final_metric['test']['male'] / param['run'], 4)
    result['pcc'] = np.round(result['pcc'] + final_metric['test']['pcc'] / param['run'], 4)
