import torch as t
from torch.utils.tensorboard import SummaryWriter
from utils.log_utils import _init_logs_
writer = SummaryWriter('log1')

def train_and_test(config, model, device, criterion, optimizer, trainloader, testloader):
    model = model.to(device)
    train_best_accuracy = 0.0
    train_best_epoch = 0
    test_best_accuracy = 0.0
    test_best_epoch = 0
    for epoch in range(config.epoch):
        train_loss = 0.0
        train_epoch_loss = 0.0
        train_total = 0
        train_correct = 0
        train_epoch_correct = 0
        batch_total = 0
        model.train()
        for i, data in enumerate(trainloader):
            inputs, labels = data
            inputs = inputs.to(device)
            labels = labels.to(device)
            outputs = model(inputs)
            optimizer.zero_grad()
            loss = criterion(outputs, labels)
            train_loss += loss.item()
            train_epoch_loss += loss.item()
            train_total += labels.size(0)
            batch_total += labels.size(0)
            _, predicted = t.max(outputs.data, 1)
            train_correct += t.sum(predicted == labels)
            train_epoch_correct += t.sum(predicted == labels)
            loss.backward()
            optimizer.step()
            if i % 100 == 99:
                print('[Epoch: %d , Batch: %5d], Loss: %.3f, Accuracy: %.3f'%(epoch+1, i+1, train_loss/100, train_correct / batch_total * 100))
                train_loss = 0
                train_correct = 0
                batch_total = 0
        train_epoch_accuracy = 100 * train_epoch_correct / train_total
        if train_epoch_accuracy > train_best_accuracy:
            train_best_accuracy = train_epoch_accuracy
            train_best_epoch = epoch+1

        with t.no_grad():
            test_loss = 0.0
            test_total = 0
            test_correct = 0
            model.eval()
            for i, data in enumerate(testloader):
                inputs, labels = data
                inputs = inputs.to(device)
                labels = labels.to(device)
                outputs = model(inputs)
                _, predicted = t.max(outputs.data, 1)
                test_total += labels.size(0)
                test_correct += t.sum(predicted == labels)
                loss = criterion(outputs, labels)
                test_loss += loss.item()
            test_epoch_accuracy = 100 * test_correct / test_total
            print("Epoch: %d, Test Accuracy: %.3f , Test Loss: %.3f "%(epoch+1, test_epoch_accuracy, test_loss/len(labels)))
            if test_epoch_accuracy > test_best_accuracy:
                test_best_accuracy = test_epoch_accuracy
                test_best_epoch = epoch+1
                t.save(model.state_dict(), f'model{test_best_epoch}.pth')
        _init_logs_(config, train_loss=train_epoch_loss, train_accuracy=train_epoch_accuracy, test_loss=test_loss,
                        test_accuracy=test_epoch_accuracy, epoch=epoch + 1)
    print("Train best Accuracy: %.3f, in epoch %d"%(train_best_accuracy, train_best_epoch))
    print('Test best accuracy: %.3f, in epoch %d'%(test_best_accuracy, test_best_epoch))

    return train_best_accuracy, train_best_epoch, test_best_accuracy, test_best_epoch

def test_best_model():
    pass




