import numpy as np
import time
import torch
import matplotlib.pyplot as plt
from tensorboardX import FileWriter, summary
from losses_bigbatch import *
import torchvision.transforms as transforms

def evaluate(model, criterion, val_iterator, n_batches):

    """
    返回整个验证集每个数据点平均的loss和accuracy，而不是总体的loss和总体的acc
    """

    loss = 0.0
    acc = 0.0 # accuracy
    
    total_samples = 0

    for j, (x_batch, y_batch) in enumerate(val_iterator):

        x_batch = x_batch.requires_grad_(False)
        x_batch = x_batch.cuda()
        y_batch = y_batch.requires_grad_(False)
        y_batch = y_batch.cuda(non_blocking=True)
        n_batch_samples = y_batch.size()[0]
        logits = model(x_batch)

        # compute logloss
        batch_loss = criterion(logits, y_batch).cpu().data.numpy().sum()

        prediction = torch.max(logits, 1)[1].cpu()
        pred_y = prediction.data.numpy()
        test_y = y_batch.cpu().data.numpy().reshape(1, -1)
        target_y = torch.from_numpy(test_y).long().data.numpy()
        batch_accuracy = (float((pred_y == target_y).astype(int).sum()) / float(target_y.size))

        loss += batch_loss*n_batch_samples
        acc += batch_accuracy*n_batch_samples
        
        total_samples += n_batch_samples

        if j >= n_batches:
            break

    return loss/total_samples, acc/total_samples


def eval(model, vgg16, style, val_iterator, n_validation_batch, nums_style, num_content = 16):
    """
    计算一个batch里面每个数据点平均的loss和acc
    并且把梯度后向传播
    """

    
    loss_mean = 0.0

    total_samples = 0
    outputs = []

    for j, (x_batch, _) in enumerate(val_iterator):

        yc = x_batch.requires_grad_(False)
        yc = yc.cuda()
        ys = style.requires_grad_(False)
        ys = ys.cuda()
        n_batch_samples = yc.size()[0]
        y = model(yc)
        if j < 8:
            tmp = y.cpu().data.numpy() * 255.0
            outputs.append(tmp.astype("uint8"))

        perceptual_loss = compute_perceptual_loss(vgg16, y, yc, num_content)
        style_losses = compute_style_loss(vgg16, y, ys, nums_style)
        loss = perceptual_loss
        for style_loss in style_losses:
            loss += style_loss
        loss_cpu = loss.cpu()
        
        batch_loss = loss_cpu.data.numpy().sum()
        loss_mean += batch_loss * n_batch_samples
        total_samples += n_batch_samples
        
        if j >= n_validation_batch:
            break
    

    return loss_mean/total_samples, outputs

def optimization_step(model, vgg16, optimizer, content, style, nums_style, num_content = 16):
    """
    计算一个batch里面每个数据点平均的loss和acc
    并且把梯度后向传播
    """

    yc = content.requires_grad_(False)
    yc = yc.cuda()
    ys = style.requires_grad_(False)
    ys = ys.cuda()
    y = model(yc)

    # image_transform = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    # y = image_transform(y)

    perceptual_loss = compute_perceptual_loss(vgg16, y, yc, num_content)
    style_losses = compute_style_loss(vgg16, y, ys, nums_style)
    loss = perceptual_loss
    for style_loss in style_losses:
        loss += style_loss * 7.0
    # y_batch_cpu = y_batch.cpu()
    # print(y_batch_cpu)
    # compute logloss
    loss_cpu = loss.cpu()
    # print("loss shape: ", loss_cpu.data)
    # print(batch_loss)
    batch_loss = loss_cpu.data.numpy().sum()
    # print(batch_loss)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    return batch_loss


def plot_reconstruction(samples, nex=8, zm=2):
    fig, axs = plt.subplots(ncols=nex, nrows=1, figsize=(zm * nex, zm))
    for axi in range(nex):
        samples[axi] = samples[axi].squeeze()
        samples[axi] = samples[axi].swapaxes(0,1)
        samples[axi] = samples[axi].swapaxes(1,2)
        axs[axi].imshow(samples[axi])
        axs[axi].axis('off')
    plt.show()

def train(target_style_tensor, styleTransferNet, vgg16, optimizer, 
          train_iterator, n_epochs, n_batches, val_iterator, n_validation_batch, saving_epoch, nums_style, num_content = 16):
    # 目前只支持训练batch是1
    # 后面会改代码试试能不能支持训练batch size能大于1
    writer = FileWriter('./output/Log/vgg16')
    start = time.time()
    styleTransferNet.train()
    style = target_style_tensor
    running_loss = 0.0

    for epoch in range(0, n_epochs):
        for step, (x_batch, _) in enumerate(train_iterator, 1 + epoch*n_batches):

            batch_loss = optimization_step(
                styleTransferNet, vgg16, optimizer, x_batch, style, nums_style, num_content
            )
            running_loss += batch_loss
            

        styleTransferNet.eval()
        test_loss, outputs= eval(
            styleTransferNet, vgg16, style, val_iterator, n_validation_batch, nums_style, num_content
        )
        end = time.time()
        
        print('validation_step: {0:.2f}  avg_train_loss: {1:.3f}  avg_val_loss: {2:.3f}  time_per_val_step: {3:.3f}'.format(
            step/n_batches, running_loss/n_batches, test_loss,
            (end - start)
        ))
        plot_reconstruction(outputs)
        # all_losses += [(
        #     step/n_batches,
        #     running_loss/validation_step, 
        #     test_loss,
        #     running_accuracy/validation_step, 
        #     test_accuracy
        # )]
        

        summary_train_loss = summary.scalar("train_loss", running_loss/n_batches)
        summary_val_loss = summary.scalar("val_loss", test_loss)
        writer.add_summary(summary_train_loss, step/n_batches)
        writer.add_summary(summary_val_loss, step/n_batches)   

        running_loss = 0.0
        start = time.time()
        styleTransferNet.train()

        if saving_epoch is not None and epoch % saving_epoch == 0:
            print('saving model for epoch {}.'.format(epoch))
            torch.save(
            styleTransferNet.state_dict(),
            '%s/vgg16_epoch_%d.pth' % ("./output/Model", epoch))
    writer.close()
    