# a pytorch based lisv2 code
# author: zelinzang
# email: zangzelin@gmail.com
import numpy as np
import torch
# from torch._C import device
import torch.optim as optim
import dataset_graph

import model.model_LISv2_mutilayer_emb as LISv2_m_moco
import paramENC as paramzzl
import loss_graph
import eval
import tool
import warnings
# warnings.filterwarnings('ignore')
import calsigmarho
torch.set_num_threads(1)

def train(args, Model, Loss, data, target, optimizer, epoch, device, adj):

    BATCH_SIZE = args["batch_size"]

    Model.train()

    num_train_sample = data.shape[0]
    num_batch = (num_train_sample - 0.5) // BATCH_SIZE + 1
    rand_index_i = torch.randperm(num_train_sample).long()
    # rand_index_i = torch.range(0, num_train_sample, device=Model.device).long()
    train_loss_sum = [0, 0, 0, 0, 0, 0, 0]

    for batch_idx in torch.arange(0, num_batch):
        # print(batch_idx)
        start = (batch_idx * BATCH_SIZE).int().to(device)
        end = torch.min(
            torch.tensor([batch_idx * BATCH_SIZE + BATCH_SIZE, num_train_sample])
        ).to(device)
        sample_index_i = rand_index_i[start : end.int()]

        optimizer.zero_grad()
        # print(sample_index_i)
        in_data = data[sample_index_i].to(device).float()
        output = Model(in_data, adj[sample_index_i][:,sample_index_i])
        # graph = graph.to(device)
        # print(data)

        loss_list = Loss(in_data, output, epoch=epoch, index=sample_index_i)
        loss_list[0].backward()
        # loss_list[1].backward()
        train_loss_sum[0] += loss_list[0].item()

        if (epoch % 100 == 0):
            print("batch {} loss {}".format(batch_idx, loss_list[0].item()))

        optimizer.step()
    # loss_list_item = [
    #     np.array(loss_list[i].item()) for i in range(len(loss_list))
    # ]
    if (epoch % 100 == 0):
        print(
            "Train Epoch: {} [{}/{} ({:.0f}%)] \t Loss: {}".format(
                epoch,
                batch_idx * BATCH_SIZE,
                num_train_sample,
                BATCH_SIZE * 100.0 * batch_idx / num_train_sample,
                train_loss_sum,
            )
        )
        # print(Model.vList[-1])

    return train_loss_sum


def Test(args, Model, data, target, optimizer, epoch, device, adj):

    Model.eval()
    BATCH_SIZE = args["batch_size"]
    # data, target = data.to(device), target.to(device)
    num_train_sample = data.shape[0]
    num_batch = (num_train_sample - 0.5) // BATCH_SIZE + 1
    rand_index_i = torch.arange(num_train_sample)

    for batch_idx in torch.arange(0, num_batch):
        start = (batch_idx * BATCH_SIZE).int()
        end = torch.min(
            torch.tensor([batch_idx * BATCH_SIZE + BATCH_SIZE, num_train_sample])
        )
        sample_index_i = rand_index_i[start : end.int()]

        datab = data.float()[sample_index_i].to(device)
        em = Model(datab, adj[sample_index_i][:,sample_index_i])
        # re = Model.Generate(em[-1])
        # print(em[0])

        em = em.detach().cpu().numpy()
        # re = re[-1].detach().cpu().numpy()
        if batch_idx == 0:
            outem = em
            # outre = re
        else:
            outem = np.concatenate((outem, em), axis=0)
            # outre = np.concatenate((outre, re), axis=0)

        # Model.Loss(em, rand_index_i)

    return outem


def main(
    args, pool=None,
):

    # args = paramzzl.GetParamMnistL()
    path = tool.GetPath(args["data_name"] + "_" + args["name"])
    tool.SaveParam(path, args)
    tool.SetSeed(args["seed"])

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("use device:", device)
    data_train, label_train, DisE, DisG, adj = dataset_graph.GetData(
        data_name=args["data_name"], device=device,
    )
    # adj = torch.tensor(adj.to_dense()).to(device)
    adj = adj.to(device)
    PG, PE, rhoG, rhoE = calsigmarho.CalSigmaRho(
        DisG=DisG, 
        DisE=DisE, 
        enlarge=args["enlarge"], 
        v=100,
        perplexity=args["perplexity"],
        dataset=args['data_name'],
        ndata=args['data_trai_n'],
        )


    Model = LISv2_m_moco.LISV2_MLP(
        input_dim=data_train.shape[1],
        networkStructure=[1000, 500, 250, args["embdim"]],
        NNmethod=args['NNmethod'],
        ).to(device)
    Loss = loss_graph.Criterion(
        data=data_train,
        DisE=DisE,
        DisG=DisG,
        perplexity=args["perplexity"],
        r_e=args["r_e"],
        r_p=args["r_p"],
        enlarge=args["enlarge"],
        v=args["v"],
        PE=PE,
        PG=PG,
        rhoE=rhoE,
        rhoG=rhoG, 
    )

    optimizer = optim.Adam(Model.parameters(), lr=args["lr"])
    gifPloterLatentTrain = tool.GIFPloter()

    # loss_his = []
    for epoch in range(0, args["epochs"] + 1):
        adj_r = dataset_graph.EdgeSampler(adj, args["dropedgerate"], tdevice=device)
        loss_item = train(
            args,
            Model=Model,
            Loss=Loss,
            data=data_train,
            target=label_train,
            # graph=graph,
            optimizer=optimizer,
            epoch=epoch,
            device=device,
            adj=adj_r,
        )
        # print(Loss.elisLoss.vCurent)

        if epoch > 0 and epoch % args["log_interval"] == 0:
            adj_r = dataset_graph.EdgeSampler(adj, 0.0, tdevice=device)
            em_train = Test(
                args, Model, data_train, label_train, optimizer, epoch, device, adj_r
            )
            (
                cl_acc,
                nmi,
                f1_macro,
                precision_macro,
                adjscore,
            ) = eval.TestClassifacationKMeans(
                em_train, label_train.detach().cpu().numpy()
            )
            # eval.TestClassifacationKNeighbors(
            #     em_train, label_train.detach().cpu().numpy()
            # )

            # input()

            title = "train_epoch_em{}.png".format(epoch)
            # gifPloterLatentTrain.AddNewFig(
            #     em_train,
            #     label_train.detach().cpu(),
            #     # his_loss=loss_his,
            #     path=path,
            #     # graph=Model.GetInput(),
            #     # link=None,
            #     title_=title,
            # )
            np.save('baseline/our'+'pumbfeature.npy', em_train)
            np.save('baseline/our'+'pumblabel.npy', label_train)
            # np.save(path+title+'emb.npy', em_train)
            # np.save(path+title+'lab.npy', label_train)
            print(
                path+title+'acc,'+str(cl_acc),
                file=open("acclogpath_{}.txt".format(args["data_name"]), "a"),
            )

    return path


if __name__ == "__main__":

    # args = paramzzl.GetParamCora()
    # path = main(args)

    # args = paramzzl.GetParamCiteseer()
    # path = main(args)

    args = paramzzl.GetParamPubmed()
    path = main(args)
