import scipy.sparse.linalg as spalg
from tqdm import tqdm

import LPSI
from graph_model import *
from propagation_model import *
from src import timing
import lzyutil as util
from config import args, devs

# SEED = args.random_seed
# random.seed(SEED)
# np.random.seed(SEED)
# torch.manual_seed(SEED)
# torch.cuda.manual_seed(SEED)
# torch.backends.cudnn.deterministic = True


dataset = args.dataset
graph = GraphDataset(dataset)

print("graph size :", graph.get_size())
lb, ub = args.lb, 0.7

data_group_size = args.data_group_size
seq_len_ub = 30

label_dataset = None
if args.generate_model == "SI":
    label_dataset = SIDataSet(graph,
                              batch_size=data_group_size,
                              seq_len_ub=seq_len_ub,
                              iter_num=args.generate_step,
                              src_num=args.num_src,
                              infect_rate=args.infect_rate,
                              lb=lb, ub=ub,
                              type_str=f"{dataset}-{args.num_src}-{args.infect_rate}-{(lb, ub)}-{data_group_size}X{seq_len_ub}-SIlabels",
                              update_cache=args.update_cache)
elif args.generate_model == "SIR":
    label_dataset = SIRDataSet(graph,
                               batch_size=data_group_size,
                               seq_len_ub=seq_len_ub,
                               iter_num=args.generate_step,
                               src_num=args.num_src,
                               infect_rate=args.infect_rate,
                               recover_rate=args._recover_rate,
                               lb=lb, ub=ub,
                               type_str=f"{dataset}-{args.generate_step}-{args.num_src}-{args.infect_rate}-{(lb, ub)}-SIRlabels",
                               update_cache=args.update_cache)

elif args.generate_model == "IC":
    label_dataset = ICDataSet(graph,
                              batch_size=data_group_size,
                              seq_len_ub=seq_len_ub,
                              iter_num=args.generate_step,
                              src_num=args.num_src,
                              infect_rate=args.infect_rate,
                              lb=lb, ub=ub,
                              type_str=f"{dataset}-{args.generate_step}-{args.num_src}-{args.infect_rate}-{(lb, ub)}-IClabels",
                              update_cache=args.update_cache)

print("load dataset done")

loss_function = F.multilabel_soft_margin_loss

data_group = label_dataset.get_data()
group_size = len(data_group)

lpsi_path = f"./cache/{dataset}-{args.generate_model}-{data_group_size}X{seq_len_ub}-{args.num_src}-{args.infect_rate}-{(lb, ub)}-{args.lpsi_alpha}-extendedLPSI.pkl"
lpsi_cached_mat_path = f"./cache/{dataset}-{args.generate_model}-{data_group_size}X{seq_len_ub}-{args.num_src}-{args.infect_rate}-{(lb, ub)}-{args.lpsi_alpha}-CM.pkl"

lpsi_train_tic = timing.tick()
input_np_group = [np.zeros((legal_data['data'].shape[0], args.num_features, legal_data['data'].shape[1])) for legal_data
                  in data_group]
if osp.exists(lpsi_path) and not args.update_cache:
    with open(lpsi_path, 'rb') as fin:
        input_np_group = pickle.load(fin)
else:
    coo_adj = graph.get_adjacency_coo()
    s_lpsi = None
    nl_coo_adj = graph.get_norm_laplacian_coo()
    if not args.lpsi_use_iter:
        s_lpsi = util.load_if_exist(lpsi_cached_mat_path)
        if s_lpsi is None:
            s_lpsi = (1 - args.lpsi_alpha) * spalg.inv(
                (sp.identity(graph.get_size()).tocoo() - args.lpsi_alpha * coo_adj).tocsc()).tocoo()
        util.store_to_path(lpsi_cached_mat_path, s_lpsi)
    for idx in range(group_size):
        cur_legal_data = data_group[idx]['data']
        cur_legal_data_len = len(cur_legal_data)
        cur_input_np = input_np_group[idx]
        for i in tqdm(range(cur_legal_data_len), desc="Generating LPSI", leave=True):
            snapshot = cur_legal_data[i]
            if args.lpsi_use_iter:
                cur_input_np[i] = LPSI.extended_LPSI_iter_process(nl_coo_adj, args.lpsi_alpha, snapshot)
            else:
                cur_input_np[i] = LPSI.extended_LPSI_cached_process(s_lpsi, snapshot)

    if not osp.exists(lpsi_path):
        with open(lpsi_path, 'wb') as fout:
            pickle.dump(input_np_group, fout)
lpsi_train_time = timing.tock(lpsi_train_tic)
for idx, input in enumerate(input_np_group):
    data_group[idx]['input'] = input
print("generate input done")

data_idx = np.arange(group_size)
random.shuffle(data_idx)
eval_len = max(1, int(args.eval_data_ratio * group_size))
train_len = group_size - eval_len
# format {"id":int,"gt":np.ndarray,"data":np.ndarray}
train_dataset = [data_group[train_idx] for train_idx in data_idx[eval_len:]]
eval_dataset = [data_group[eval_idx] for eval_idx in data_idx[:eval_len]]
print("data division done")
eval_lpsi = False
# lpsi predict process
lpsi_eval_tic = timing.tick()
lpsi_pr, lpsi_re, lpsi_F1, lpsi_aver_erd = 0, 0, 0, 0
if eval_lpsi:
    lpsi_eval_data = eval_dataset[0]
    lpsi_score = lpsi_eval_data['input'][0, 1]
    gt = lpsi_eval_data['gt']
    candidate = util.get_candidate(lpsi_score, graph.get_nx_graph())
    lpsi_pr, lpsi_re, lpsi_F1, lpsi_aver_erd = util.eval_by_candidate(gt, candidate, graph.get_nx_graph())
lpsi_eval_time = timing.tock(lpsi_eval_tic)
print("lpsi evaluation done")

model = ReverseSequenceGenerator(ClusteringMachine(graph, args.cluster_number), devs.device, cluster_train_batch=1,
                                 dropout=args.dropout,
                                 activation=F.relu)
optimizer_list = [torch.optim.Adam(sub_model.parameters(), lr=args.learning_rate) for sub_model in model.blocks]
for optimizer in optimizer_list:
    optimizer.zero_grad()
model.train()
max_gram_use = 0
total_train_time = 0
total_train_snapshots = 0
aver_train_time = 0

with tqdm(range(args.epochs)) as train_progress:
    for epoch in train_progress:
        for train_data in train_dataset:
            train_tic = timing.tick()
            cur_src_list = train_data['gt']
            gt_np = np.zeros(graph.get_size())
            gt_np[cur_src_list] = 1
            input_np = train_data['input'].transpose((2, 0, 1))  # (0,1,2)->(2,0,1)  (batch,4,N)->(N,batch,4)
            input_tensor = torch.from_numpy(input_np).to(torch.float32).to(devs.device)
            gt_np = np.repeat(gt_np.reshape(1, -1), len(train_data['data']), axis=0)  # shape??
            gt_tensor = torch.from_numpy(gt_np).to(torch.float32).to(devs.device)
            out, loss_list, sg, l2 = model.process(input_tensor, gt_tensor, loss_function, optimizer_list,
                                                   train_data['gt'])
            total_train_time += timing.tock(train_tic)
            total_train_snapshots += len(out)
            print(f"Batch BP length {len(train_data['data'])} : total loss is :", sum(loss_list), "sg is :", sum(sg),
                  "l2 is :", sum(l2))
            max_gram_use = max(max_gram_use, (torch.cuda.memory_allocated() << 20))

aver_train_time = total_train_time / total_train_snapshots

total_eval_time = 0
total_eval_snapshots = 0
aver_eval_time = 0
total_eval_F1, total_eval_erd, total_eval_pr, total_eval_re = 0, 0, 0, 0
aver_eval_F1, aver_eval_erd, aver_eval_pr, aver_eval_re = 0, 0, 0, 0

model.eval()
for eval_data in eval_dataset:
    eval_tic = timing.tick()
    cur_src_list = eval_data['gt']
    gt_np = np.zeros(graph.get_size())
    gt_np[cur_src_list] = 1
    input_np = eval_data['input'].transpose((2, 0, 1))  # (0,1,2)->(2,0,1)  (batch,4,N)->(N,batch,4)
    input_tensor = torch.from_numpy(input_np).to(torch.float32).to(devs.device)
    gt_np = np.repeat(gt_np.reshape(1, -1), len(eval_data['data']), axis=0)  # shape??
    gt_tensor = torch.from_numpy(gt_np).to(torch.float32).to(devs.device)
    out, _, _, _ = model.process(input_tensor, gt_tensor, loss_function, optimizer_list, eval_data['gt'], train=False)
    sum_out = util.aggregate_out(out, torch.from_numpy(eval_data['data']))
    node_sort = torch.flip(sum_out.argsort(), [0])
    eval_F1, eval_erd, eval_pr, eval_re, recall_pos = util.evaluate_using_src(sum_out.cpu(), node_sort.cpu(),
                                                                              torch.from_numpy(cur_src_list).cpu(),
                                                                              graph.get_nx_graph(),
                                                                              calc_erd=args.calc_erd)
    total_eval_time += timing.tock(eval_tic)
    total_eval_snapshots += len(out)
    total_eval_F1 += eval_F1
    total_eval_erd += eval_erd
    total_eval_pr += eval_pr
    total_eval_re += eval_re
    print(f"eval result F1:{eval_F1}, ERD:{eval_erd}, recall pos:{recall_pos}")

aver_eval_F1 = total_eval_F1 / eval_len
aver_eval_erd = total_eval_erd / eval_len
aver_eval_re = total_eval_re / eval_len
aver_eval_pr = total_eval_pr / eval_len

aver_eval_time = total_eval_time / total_eval_snapshots

rst_name_list = [
    "lpsi_total", "lpsi_train", "lpsi_eval",
    "crgcn_total", "crgcn_train", "crgcn_eval",
    "gram_use",
    "lpsi_F1", "lpsi_erd", "lpsi_pr", 'lpsi_re',
    "F1", "erd", "pr", 're',
]
rst = [
    lpsi_train_time + lpsi_eval_time, lpsi_train_time, lpsi_eval_time,
    aver_eval_time + aver_train_time, aver_train_time, aver_eval_time,
    max_gram_use,
    lpsi_F1, lpsi_aver_erd, lpsi_pr, lpsi_re,
    aver_eval_F1, aver_eval_erd, aver_eval_pr, aver_eval_re
]
print("rst :", {rst_name_list[i]: rst[i] for i in range(len(rst_name_list))})
util.add_train_record(args, rst_name_list, rst)

# lpsi_tensor = torch.from_numpy(all_input_np.transpose((1, 0, 2))[0]).cpu()
# epoch_aver_F1, epoch_aver_erd = -2, 1e5
# ratio_sum, ratio_count = 0, 0
# src_list = torch.tensor(label_dataset.get_src_list()).to(device)
#
#
#
# evaluate_idx = list(range(min(legal_data_len, batch_size) // 2))
# eval_y = legal_data[evaluate_idx]
# eval_np = all_input_np[evaluate_idx].transpose((2, 0, 1))
# eval_input_tensor = torch.from_numpy(eval_np).to(torch.float32).to(device)
# eval_y_tensor = torch.from_numpy(eval_y).to(device)
# equal_cnt = 0
#
# eval_step = args.eval_step
# min_eval_time = 1e9
# total_eval_time = 0
# sum_tol = 0
# max_epoch = 0
#
# lpsi_eval_tic = timing.tic()
# lpsi_out = lpsi_tensor[0]
# lpsi_node_sort = torch.flip(lpsi_out.argsort(), [0])
# lpsi_F1, lpsi_erd, _, _ = util.evaluate_using_src(lpsi_out.cpu(), lpsi_node_sort.cpu(), src_list.cpu(), graph.get_nx_graph())
# lpsi_eval_time = util.tock(lpsi_eval_tic)
#
# print("------------- lpsi eval time:", lpsi_eval_time, "-----------------------------------------------")
# crgcn_tic = timing.tic()
#
# with tqdm(range(args.epochs)) as train_progress:
#     for epoch in train_progress:
#         max_epoch = epoch
#         model.train()
#         train_progress.set_description(f"epoch {epoch}")
#         sum_F1, sum_erd = 0, 0
#         for batch_idx in range(split_num):
#             eval_y = legal_data[batch_idx * batch_size:batch_idx * batch_size + batch_size]
#             input_np = all_input_np[batch_idx * batch_size:batch_idx * batch_size + batch_size]
#             input_np = input_np.transpose((2, 0, 1))  # (0,1,2)->(2,0,1)  (batch,4,N)->(N,batch,4)
#             eval_y_tensor = torch.from_numpy(eval_y).to(torch.float32).to(device)
#             eval_input_tensor = torch.from_numpy(input_np).to(torch.float32).to(device)
#             out, loss_list, sg, l2 = model.process(eval_input_tensor, eval_y_tensor, loss_function, optimizer_list, src_list)
#             print(f"Batch BP {batch_idx + 1}/{split_num} : total loss is :", sum(loss_list), "sg is :", sum(sg), "l2 is :", sum(l2))
#         #     assert isinstance(out, torch.Tensor)
#         #     # change out
#         #     sum_out = util.aggregate_out(out, eval_y_tensor)
#         #     node_sort = torch.flip(sum_out.argsort(), [0])
#         #     eval_F1, eval_erd, ratio = util.calc_evaluation_matrix_without_LPSI(sum_out.cpu(), node_sort.cpu(), src_list.cpu(), graph.get_nx_graph(), print_detail=True)
#         #     sum_F1 += eval_F1
#         #     sum_erd += eval_erd
#         #     ratio_sum += ratio
#         #     ratio_count += 1
#         # cur_aver_F1, cur_aver_erd = sum_F1 / legal_data_len, sum_erd / legal_data_len
#         # print(f"train done. F1:{cur_aver_F1},average error distance:{cur_aver_erd},ratio:{ratio_sum / ratio_count}")
#
#         if (epoch + 1) % eval_step == 0 or epoch == args.epochs - 1:
#             update_stat = True
#
#             with torch.no_grad():
#                 model.eval()
#                 torch.cuda.empty_cache()
#                 eval_tic = timing.tic()
#                 out, _, _, _ = model.process(eval_input_tensor, eval_y_tensor, loss_function, optimizer_list, src_list, train=False)
#                 sum_out = util.aggregate_out(out, eval_y_tensor)
#                 node_sort = torch.flip(sum_out.argsort(), [0])
#                 # eval_F1, eval_erd = util.evaluate(sum_out.cpu(), node_sort.cpu(), src_list.cpu(), ratio_sum / ratio_count, graph.get_nx_graph())
#                 eval_F1, eval_erd, ratio, recall_pos = util.evaluate_using_src(sum_out.cpu(), node_sort.cpu(), src_list.cpu(), graph.get_nx_graph())
#                 eval_time = util.tock(eval_tic)
#                 print("-------------eval time:", eval_time / out.shape[0], "-----------------------------------------------")
#                 min_eval_time = min(min_eval_time, eval_time)
#                 total_eval_time += eval_time
#                 print(f"evaluation done. F1:{eval_F1},average error distance:{eval_erd},fetching:{int(ratio * len(sum_out)) + 1}, recall_pos:{recall_pos},time:{eval_time}")
#                 if eval_F1 == epoch_aver_F1 and eval_erd == epoch_aver_erd:
#                     equal_cnt += 1
#                     if equal_cnt > 3:
#                         break
#                     continue
#
#                 if args.prior_matrix == "F1":
#                     if eval_F1 < epoch_aver_F1 and epoch > args.force_train:
#                         if sum_tol < args.acc_tol:
#                             sum_tol += 1
#                             update_stat = False
#                         else:
#                             break
#                     else:
#                         sum_tol = 0
#                 else:
#                     if eval_erd > epoch_aver_erd and epoch > args.force_train:
#                         if sum_tol < args.acc_tol:
#                             sum_tol += 1
#                             update_stat = False
#                         else:
#                             break
#                     else:
#                         sum_tol = 0
#                 equal_cnt = 0
#                 if update_stat:
#                     epoch_aver_F1, epoch_aver_erd = eval_F1, eval_erd
#     print(f"training process complete, best F1:{epoch_aver_F1},best erd:{epoch_aver_erd}")
# sec = util.tock(crgcn_tic)
#
# print(f"it takes {sec}(for CRGCN),{lpsi_time + lpsi_eval_time}(for LPSI) seconds to train and evaluate")
# util.add_train_record(args, epoch_aver_F1, epoch_aver_erd, f"eval:{min_eval_time},train:{sec - total_eval_time}", args.dataset)
#
# # util.init_csv(args, (epoch_aver_F1, epoch_aver_erd), 'karate')
# # y = legal_data[evaluate_idx]
# # evaluate_np = all_input_np[evaluate_idx].transpose((2, 0, 1))
# # input_tensor = torch.from_numpy(input_np).to(torch.float32).to(device)
# # y_tensor = torch.from_numpy(y).to(device)
# # out, _, _, _ = model.process(input_tensor, y_tensor, loss_function, optimizer_list, src_list, train=False)
# # sum_out = util.aggregate_out(out, y_tensor)
# # node_sort = torch.flip(sum_out.argsort(), [0])
# # F1, erd = util.evaluate(sum_out.cpu(), node_sort.cpu(), src_list.cpu(), ratio_sum / ratio_count, graph.get_nx_graph())
# # print(f"evaluation done. F1:{F1},average error distance:{erd}")
# # util.add_train_record(args, F1, erd, "epinions")
