import scipy.sparse.linalg as spalg
import torch
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
import LPSI
from graph_model import *
from propagation_model import *
import timing
import lzyutil as util
from config import args, devs

# SEED = args.random_seed
# random.seed(SEED)
# np.random.seed(SEED)
# torch.manual_seed(SEED)
# torch.cuda.manual_seed(SEED)
# torch.backends.cudnn.deterministic = True

LPSI_METHOD, RSG_METHOD = 'lpsi', 'rsg'
F1_METRIC, AUC_METRIC, AED_METRIC = 'f1', 'auc', 'aed'

if __name__ == '__main__':
    print(args)

    # prep
    dataset = args.dataset

    graph = GraphDataset(dataset, use_cache=args.use_cache,
                         overwrite=args.update_cache)
    print("graph size :", graph.get_size())
    numnodes = graph.get_size()
    lb, ub = args.lb, args.ub

    data_group_size = args.data_group_size
    seq_len_ub = args.seq_len

    label_dataset = None
    num_src = args.num_src
    if args.generate_model == "SI":
        label_dataset = SIDataSet(graph,
                                  batch_size=data_group_size,
                                  seq_len_ub=seq_len_ub,
                                  iter_num=args.generate_step,
                                  src_num=num_src,
                                  infect_rate=args.infect_rate,
                                  lb=lb, ub=ub,
                                  update_cache=args.update_cache)
    elif args.generate_model == "SIR":
        label_dataset = SIRDataSet(graph,
                                   batch_size=data_group_size,
                                   seq_len_ub=seq_len_ub,
                                   iter_num=args.generate_step,
                                   src_num=num_src,
                                   infect_rate=args.infect_rate,
                                   recover_rate=args.recover_rate,
                                   lb=lb, ub=ub,
                                   update_cache=args.update_cache)

    elif args.generate_model == "IC":
        label_dataset = ICDataSet(graph,
                                  batch_size=data_group_size,
                                  seq_len_ub=seq_len_ub,
                                  iter_num=args.generate_step,
                                  src_num=args.num_src,
                                  infect_rate=args.infect_rate,
                                  lb=lb, ub=ub,
                                  update_cache=args.update_cache)

    print("load dataset done", label_dataset)

    lpsi_cache_id = f'lpsi_{label_dataset.cache_path_prefix}'
    batch_size = args.batch_size
    dataloader = DataLoader(dataset=label_dataset,
                            batch_size=batch_size,
                            shuffle=True)

    cm = ClusteringMachine(graph, args.cluster_number)

    rsg_model = ReverseSequenceGenerator(
        cm, devs.device,
        cluster_train_batch=1,
        dropout=args.dropout
    )

    for p in rsg_model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)

    optimizer_list = rsg_model.generate_optimizer()

    rsg_model.train()

    lpsi_best_f1, lpsi_best_auc = .0, .0
    rsg_best_f1, rsg_best_auc = .0, .0

    train_epoch = args.epochs
    for epoch in tqdm(list(range(train_epoch)), desc='train process'):

        for v in dataloader:
            metrics = lzyutil.CriterionMetrics()
            y_tensor = v['y'].to(device=devs.device,dtype=torch.float32)
            src_list = v['gt'].to(devs.device)
            positive_lpsi_data = v['lpsi_positive'].to(
                device=devs.device, dtype=torch.float32)
            negative_lpsi_data = v['lpsi_negative'].to(
                device=devs.device, dtype=torch.float32)

            assert y_tensor.shape == positive_lpsi_data.shape
            assert y_tensor.shape == negative_lpsi_data.shape

            inputs = torch.transpose(
                torch.flip(
                    torch.stack([y_tensor, positive_lpsi_data,
                                negative_lpsi_data], dim=-1),
                    dims=[0]),
                0, 1
            ).contiguous().to(devs.device)
            
            node_feat = label_dataset.get_meta(v['id'])

            all_result, loss_list, sg_loss_list, l2_loss_list = rsg_model.process(
                inputs=inputs,
                node_feat=node_feat,
                loss_func=nn.BCEWithLogitsLoss(),
                optimizer_list=optimizer_list,
                src_list=src_list,
                train=True
            )
            print(sum(loss_list) / len(loss_list), sum(sg_loss_list) / len(sg_loss_list),
                  sum(l2_loss_list) / len(l2_loss_list))

            ground_truth = torch.zeros(
                (batch_size, numnodes), dtype=torch.float32)
            graph.cur_ts = graph.max_ts

            for i in [0]:
                ground_truth[i, v['gt'][i]] = 1
                lpsi_pred = positive_lpsi_data[i][0]

                lpsi_auc, lpsi_f1 = lzyutil.criterion(
                    lpsi_pred, ground_truth[i])

                # lpsi_erd = lzyutil.avg_error_distance(undirected_nxg, torch.topk(lpsi_pred, num_src)[1], v['gt'][i])
                # metrics.add(AED_METRIC, LPSI_METHOD, lpsi_erd)

                metrics.add(AUC_METRIC, LPSI_METHOD, lpsi_auc)
                metrics.add(F1_METRIC, LPSI_METHOD, lpsi_f1)

                rsg_pred = all_result[i].detach().to(devs.backup_device)
                rsg_auc, rsg_f1 = lzyutil.criterion(
                    rsg_pred, ground_truth[i]
                )
                # rsg_erd = lzyutil.avg_error_distance(undirected_nxg, torch.topk(rsg_pred, num_src)[1], v['gt'][i])
                # metrics.add(AED_METRIC, rsg_METHOD, rsg_erd)

                metrics.add(AUC_METRIC, RSG_METHOD, rsg_auc)
                metrics.add(F1_METRIC, RSG_METHOD, rsg_f1)

            print(metrics.conclude())

    rst_name_list = [
        "lpsi_f1", "gen_f1",
        "lpsi_auc", "gen_auc",
        "lpsi_erd", "gen_erd"
    ]
