import random
import time

import numpy as np
import torch
from torch import optim
import torch.nn.functional as F

from load import load_data
from model import GPNEncoder, GPNValuator
from utils import task_generator, euclidean_dist, accuracy, f1

hidden = 16
dropout = 0.5
lr = 0.005
weight_decay = 5e-4

use_cuda = torch.cuda.is_available()


def inner_process(ft, adjacent, deg, y, encoder, scorer, class_selected, id_support, id_query, n_way, k_shot):
    embeddings = encoder(ft, adjacent)
    z_dim = embeddings.size()[1]
    scores = scorer(ft, adjacent)

    # embedding lookup
    support_embeddings = embeddings[id_support]
    support_embeddings = support_embeddings.view([n_way, k_shot, z_dim])
    query_embeddings = embeddings[id_query]

    # node importance
    support_degrees = torch.log(deg[id_support].view([n_way, k_shot]))
    support_scores = scores[id_support].view([n_way, k_shot])
    support_scores = torch.sigmoid(support_degrees * support_scores).unsqueeze(-1)
    support_scores = support_scores / torch.sum(support_scores, dim=1, keepdim=True)
    support_embeddings = support_embeddings * support_scores

    # compute loss
    prototype_embeddings = support_embeddings.sum(1)
    dists = euclidean_dist(query_embeddings, prototype_embeddings)
    output = F.log_softmax(-dists, dim=1)

    labels_new = torch.LongTensor([class_selected.index(i) for i in y[id_query]])
    if use_cuda:
        labels_new = labels_new.cuda()

    return output, labels_new


def train_epoch(ft, adjacent, deg, y, optimizer_encoder, optimizer_scorer, encoder, scorer, class_selected, id_support,
                id_query, n_way, k_shot):
    encoder.train()
    scorer.train()
    optimizer_encoder.zero_grad()
    optimizer_scorer.zero_grad()

    output, labels_new = inner_process(ft, adjacent, deg, y, encoder, scorer, class_selected, id_support, id_query,
                                       n_way, k_shot)

    loss_train = F.nll_loss(output, labels_new)

    loss_train.backward()
    optimizer_encoder.step()
    optimizer_scorer.step()

    if use_cuda:
        output = output.cpu().detach()
        labels_new = labels_new.cpu().detach()
    acc_train = accuracy(output, labels_new)
    f1_train = f1(output, labels_new)

    return acc_train, f1_train


def test_epoch(ft, adjacent, deg, y, encoder, scorer, class_selected, id_support, id_query, n_way, k_shot):
    encoder.eval()
    scorer.eval()

    output, labels_new = inner_process(ft, adjacent, deg, y, encoder, scorer, class_selected, id_support, id_query,
                                       n_way, k_shot)

    if use_cuda:
        output = output.cpu().detach()
        labels_new = labels_new.cpu().detach()
    acc_test = accuracy(output, labels_new)
    f1_test = f1(output, labels_new)

    return acc_test, f1_test


def log_loss(ft, adjacent, deg, y, encoder, scorer, meta_num, pool, n_way, k_shot, label):
    meta_acc = []
    meta_f1 = []
    for idx in range(meta_num):
        id_support, id_query, class_selected = pool[idx]
        acc_test, f1_test = test_epoch(ft, adjacent, deg, y, encoder, scorer, class_selected, id_support, id_query,
                                       n_way, k_shot)
        meta_acc.append(acc_test)
        meta_f1.append(f1_test)
    print("Meta-{}_Accuracy: {}, Meta-{}_F1: {}".format(label, np.array(meta_acc).mean(axis=0), label,
                                                        np.array(meta_f1).mean(axis=0)))


def train(ft, adjacent, y, deg, n_way, k_shot, n_query, meta_test_num, meta_valid_num, episodes):
    random.seed(666)
    torch.manual_seed(666)
    if use_cuda:
        torch.cuda.manual_seed(666)

    encoder = GPNEncoder(n_feat=ft.shape[1], n_hid=hidden, dropout=dropout)
    scorer = GPNValuator(n_feat=ft.shape[1], n_hid=hidden, dropout=dropout)
    optimizer_encoder = optim.Adam(encoder.parameters(), lr=lr, weight_decay=weight_decay)
    optimizer_scorer = optim.Adam(scorer.parameters(), lr=lr, weight_decay=weight_decay)

    if use_cuda:
        encoder.cuda()
        scorer.cuda()
        ft = ft.cuda()
        adjacent = adjacent.cuda()
        y = y.cuda()
        deg = deg.cuda()

    valid_pool = [task_generator(id_by_class, class_list_valid, n_way, k_shot, n_query) for _ in range(meta_valid_num)]
    test_pool = [task_generator(id_by_class, class_list_test, n_way, k_shot, n_query) for _ in range(meta_test_num)]

    t_total = time.time()
    meta_train_acc = []

    for episode in range(episodes):
        id_support, id_query, class_selected = \
            task_generator(id_by_class, class_list_train, n_way, k_shot, n_query)
        acc_train, f1_train = train_epoch(ft, adjacent, deg, y, optimizer_encoder, optimizer_scorer, encoder, scorer,
                                          class_selected, id_support, id_query, n_way, k_shot)
        meta_train_acc.append(acc_train)
        if episode > 0 and episode % 10 == 0:
            print("-------Episode {}-------".format(episode))
            print("Meta-Train_Accuracy: {}".format(np.array(meta_train_acc).mean(axis=0)))

            # validation
            log_loss(ft, adjacent, deg, y, encoder, scorer, meta_valid_num, valid_pool, n_way, k_shot, 'valid')
            # testing
            log_loss(ft, adjacent, deg, y, encoder, scorer, meta_test_num, test_pool, n_way, k_shot, 'test')

    print("Total time elapsed: {:.4f}s".format(time.time() - t_total))


if __name__ == '__main__':
    adj, features, labels, degree, class_list_train, class_list_valid, class_list_test, id_by_class = load_data(
        'Amazon_clothing')
    train(features, adj, labels, degree, 5, 5, 20, 50, 50, 1000)
