import numpy as np
import time
from numpy.core.fromnumeric import shape
import torch
import matplotlib.pyplot as plt
from tensorboardX import FileWriter, summary
from losses_bigbatch import *
import torchvision.transforms as transforms
WHITE = (255, 255, 255)
PEACHPUFF = (255, 218, 185) 
TURQUOISE4 = (0, 134, 139)
BLACK = (0, 0, 0)
DARKSEAGREEN1 = (193, 255, 193)
GREY = (190, 190, 190)
SEAGREEN4 = (46, 139, 87)
NAVYBLUE = (0, 0, 128)
KHAKI2 = (238, 230, 133)
YELLOW = (255, 255, 0)
COLOR_CONTAINER = [WHITE, PEACHPUFF, TURQUOISE4, BLACK, DARKSEAGREEN1, GREY,
                   SEAGREEN4, NAVYBLUE, KHAKI2, YELLOW]

def evaluate(model, criterion, val_iterator, n_batches):

    """
    返回整个验证集每个数据点平均的loss和accuracy，而不是总体的loss和总体的acc
    """

    loss = 0.0
    acc = 0.0 # accuracy
    
    total_samples = 0

    for j, (x_batch, y_batch) in enumerate(val_iterator):

        x_batch = x_batch.requires_grad_(False)
        x_batch = x_batch.cuda()
        y_batch = y_batch.requires_grad_(False)
        y_batch = y_batch.cuda(non_blocking=True)
        n_batch_samples = y_batch.size()[0]
        logits = model(x_batch)

        # compute logloss
        batch_loss = criterion(logits, y_batch).cpu().data.numpy().sum()

        prediction = torch.max(logits, 1)[1].cpu()
        pred_y = prediction.data.numpy()
        test_y = y_batch.cpu().data.numpy().reshape(1, -1)
        target_y = torch.from_numpy(test_y).long().data.numpy()
        batch_accuracy = (float((pred_y == target_y).astype(int).sum()) / float(target_y.size))

        loss += batch_loss*n_batch_samples
        acc += batch_accuracy*n_batch_samples
        
        total_samples += n_batch_samples

        if j >= n_batches:
            break

    return loss/total_samples, acc/total_samples


def eval(model, val_iterator, n_validation_batch = 8):
    """
    计算一个batch里面每个数据点平均的loss和acc
    并且把梯度后向传播
    """

    
    loss_mean = 0.0

    total_samples = 0
    outputs = []
    data = []

    for j, (x_batch, y_batch) in enumerate(val_iterator):

        x_gpu = x_batch.requires_grad_(False)
        x_gpu = x_gpu.cuda()
        y_gpu = y_batch.requires_grad_(False)
        y_gpu = y_gpu.cuda()
        y_pred = model(x_gpu)['out']
        n_sample = y_pred.size()[0]

        if j < 8:
            tmp, index_ = torch.max(y_pred, dim=1)
            index_ = index_.cpu().data.numpy()
            outputs.append(index_.astype("uint8"))
            data.append(x_batch.data.numpy() * 255)

        loss = compute_loss(y_pred, y_gpu)
        loss_cpu = loss.cpu()
        
        batch_loss = loss_cpu.data.numpy().mean()
        loss_mean += batch_loss * n_sample
        total_samples += n_sample
        
        if j >= n_validation_batch:
            break
    

    return loss_mean/total_samples, outputs, data

def optimization_step(model, optimizer, x_batch, y_batch):
    """
    计算一个batch里面每个数据点平均的loss和acc
    并且把梯度后向传播
    """

    x_gpu = x_batch.requires_grad_(False)
    x_gpu = x_gpu.cuda()
    y_gpu = y_batch.requires_grad_(False)
    y_gpu = y_gpu.cuda()
    y_pred = model(x_gpu)['out']
    # print(y_pred.size())
    # image_transform = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    # y = image_transform(y)

    loss = compute_loss(y_pred, y_gpu)
    
    
    loss_cpu = loss.cpu()
    # print("loss shape: ", loss_cpu.data)
    # print(batch_loss)
    batch_loss = loss_cpu.data.numpy().sum()
    # print(batch_loss)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    return batch_loss


def plot_reconstruction(samples, nex=8, zm=2):
    fig, axs = plt.subplots(ncols=nex, nrows=1, figsize=(zm * nex, zm))
    for axi in range(nex):
        # samples[axi] = samples[axi].squeeze()
        # samples[axi] = samples[axi].swapaxes(0,1)
        # samples[axi] = samples[axi].swapaxes(1,2)
        x, y = samples[0].shape[1], samples[0].shape[2]
        tmp = np.zeros(shape=(x, y, 3))
        for i in range(x):
            for j in range(y):
                tmp[i][j] = COLOR_CONTAINER[int(samples[axi][0][i][j])]
        axs[axi].imshow(tmp.astype("uint8"))
        axs[axi].axis('off')
    plt.show()

def plot_reconstruction2(samples, nex=8, zm=2):
    fig, axs = plt.subplots(ncols=nex, nrows=1, figsize=(zm * nex, zm))
    for axi in range(nex):
        samples[axi] = samples[axi].squeeze()
        samples[axi] = samples[axi].swapaxes(0,1)
        samples[axi] = samples[axi].swapaxes(1,2)
        axs[axi].imshow(samples[axi].astype("uint8"))
        axs[axi].axis('off')
    plt.show()

def train(segmentationNet, optimizer, 
          train_iterator, n_epochs, n_batches, val_iterator, n_validation_batch, saving_epoch):
    # 目前只支持训练batch是1
    # 后面会改代码试试能不能支持训练batch size能大于1
    writer = FileWriter('./output/Log/fcn')
    start = time.time()
    segmentationNet.train()
    running_loss = 0.0

    for epoch in range(0, n_epochs):
        for step, (x_batch, y_batch) in enumerate(train_iterator, 1 + epoch*n_batches):

            batch_loss = optimization_step(
                segmentationNet, optimizer, x_batch, y_batch
            )
            running_loss += batch_loss
            

        segmentationNet.eval()
        test_loss, outputs, data_y= eval(
            segmentationNet, val_iterator, n_validation_batch
        )
        end = time.time()
        
        print('validation_step: {0:.2f}  avg_train_loss: {1:.3f}  avg_val_loss: {2:.3f}  time_per_val_step: {3:.3f}'.format(
            step/n_batches, running_loss/n_batches, test_loss,
            (end - start)
        ))
        plot_reconstruction(outputs)
        plot_reconstruction2(data_y)
        # all_losses += [(
        #     step/n_batches,
        #     running_loss/validation_step, 
        #     test_loss,
        #     running_accuracy/validation_step, 
        #     test_accuracy
        # )]
        

        summary_train_loss = summary.scalar("train_loss", running_loss/n_batches)
        summary_val_loss = summary.scalar("val_loss", test_loss)
        writer.add_summary(summary_train_loss, step/n_batches)
        writer.add_summary(summary_val_loss, step/n_batches)   

        running_loss = 0.0
        start = time.time()
        segmentationNet.train()

        if saving_epoch is not None and epoch % saving_epoch == 0:
            print('saving model for epoch {}.'.format(epoch))
            torch.save(
            segmentationNet.state_dict(),
            '%s/seg_epoch_%d.pth' % ("./output/Model", epoch))
    writer.close()
    