from math import log
from utils import get_idx
from utils import accuracy
import time
import torch.nn.functional as F
import torch.optim as optim


def train_epoch(model, optimizer, epoch, features, adj, labels, idx_train, idx_val, logs):
    t = time.time()
    model.train()
    optimizer.zero_grad()
    output = model(features, adj)
    loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    acc_train = accuracy(output[idx_train], labels[idx_train])
    loss_train.backward()
    optimizer.step()
    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    acc_val = accuracy(output[idx_val], labels[idx_val])
    time_spend = time.time() - t
    print('epoch: {:04d}'.format(epoch+1),
          'loss_train: {:.4f}'.format(loss_train.item()),
          'acc_train: {:.4f}'.format(acc_train.item()),
          'loss_val: {:.4f}'.format(loss_val.item()),
          'acc_val: {:.4f}'.format(acc_val.item()),
          'time: {:.4f}'.format(time_spend))
    if logs is not None:
        logs['train_log'].write('{:04d} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}\n'.format(epoch + 1,
                                                                                   loss_train.item(),
                                                                                   acc_train.item(),
                                                                                   loss_val.item(),
                                                                                   acc_val.item(),
                                                                                   time_spend))

def test(model, features, adj, labels, idx_test, logs, epoch='final'):
    model.eval()
    output = model(features, adj)
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    print('test results: epoch={} loss= {:.4f} accuracy= {:.4f}'.format(epoch, loss_test.item(), acc_test.item()))
    if logs is not None:
        logs['test_log'].write('epoch={} loss={:.4f} accuracy={:.4f}\n'.format(epoch, loss_test.item(), acc_test.item()))


def train(model, features, adj, labels,
          total_epoch, train_size, val_size, test_size, lr, weight_decay, test_every,
          logs):
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
    idx_train, idx_val, idx_test = get_idx(features.shape[0], train_size, val_size, test_size)
    if logs is not None:
        logs['train_log'].write('epoch loss_train acc_train loss_val acc_val time\n')
    t_total = time.time()
    for epoch in range(total_epoch):
        train_epoch(model=model,
              optimizer=optimizer,
              epoch=epoch,
              features=features,
              adj=adj,
              labels=labels,
              idx_train=idx_train,
              idx_val=idx_val,
              logs=logs)
        if (epoch + 1) % test_every == 0:
            test(epoch=epoch + 1,
                 model=model,
                 features=features,
                 adj=adj,
                 labels=labels,
                 idx_test=idx_test,
                 logs=logs)
    total_spend = time.time() - t_total
    print()
    print("total time: {:.4f}s".format(total_spend))
    test(model=model,
         features=features,
         adj=adj,
         labels=labels,
         idx_test=idx_test,
         logs=logs)
    print()
