import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
import time
import copy
from tqdm import tqdm
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch
from tensorboardX import FileWriter, summary

def evaluate(model, criterion, val_iterator, n_batches):

    """
    返回整个验证集每个数据点平均的loss和accuracy，而不是总体的loss和总体的acc
    """

    loss = 0.0
    acc = 0.0 # accuracy
    
    total_samples = 0

    for j, (x_batch, y_batch) in enumerate(val_iterator):

        x_batch = x_batch.requires_grad_(False)
        x_batch = x_batch.cuda()
        y_batch = y_batch.requires_grad_(False)
        y_batch = y_batch.cuda(non_blocking=True)
        n_batch_samples = y_batch.size()[0]
        logits = model(x_batch)

        # compute logloss
        batch_loss = criterion(logits, y_batch).cpu().data.numpy().sum()

        prediction = torch.max(logits, 1)[1].cpu()
        pred_y = prediction.data.numpy()
        test_y = y_batch.cpu().data.numpy().reshape(1, -1)
        target_y = torch.from_numpy(test_y).long().data.numpy()
        batch_accuracy = (float((pred_y == target_y).astype(int).sum()) / float(target_y.size))

        loss += batch_loss*n_batch_samples
        acc += batch_accuracy*n_batch_samples
        
        total_samples += n_batch_samples

        if j >= n_batches:
            break

    return loss/total_samples, acc/total_samples

def optimization_step(model, criterion, optimizer, x_batch, y_batch):
    """
    计算一个batch里面每个数据点平均的loss和acc
    并且把梯度后向传播
    """

    x_batch = x_batch.requires_grad_(False)
    x_batch = x_batch.cuda()
    y_batch = y_batch.requires_grad_(False)
    y_batch = y_batch.cuda(non_blocking=True)
    logits = model(x_batch)

    # y_batch_cpu = y_batch.cpu()
    # print(y_batch_cpu)
    # compute logloss
    loss = criterion(logits, y_batch)
    loss_cpu = loss.cpu()
    
    # print("loss shape: ", loss_cpu.data)
    # print(batch_loss)

    prediction = torch.max(logits, 1)[1].cpu()
    pred_y = prediction.data.numpy()
    test_y = y_batch.cpu().data.numpy().reshape(1, -1)
    target_y = torch.from_numpy(test_y).long().data.numpy()
    # print(target_y)
    batch_accuracy = (float((pred_y == target_y).astype(int).sum()) / float(target_y.size))

    batch_loss = loss_cpu.data.numpy().sum()
    # print(batch_loss)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    return batch_loss, batch_accuracy


def train(model, criterion, optimizer,
          train_iterator, n_epochs, n_batches,
          val_iterator, validation_step, n_validation_batches,
          saving_epoch, lr_scheduler=None):

    all_losses = []
    # all_models = []
    
    is_reduce_on_plateau = isinstance(lr_scheduler, ReduceLROnPlateau)

    running_loss = 0.0
    running_accuracy = 0.0
    writer = FileWriter('./output/Log/resnet50')
    start = time.time()
    model.train()

    for epoch in range(0, n_epochs):
        for step, (x_batch, y_batch) in enumerate(train_iterator, 1 + epoch*n_batches):
            # x_batch.requires_grad_(False)
            # x_batch.cuda()
            # y_batch.requires_grad_(False)
            # y_batch.cuda(non_blocking=True)
            
            if lr_scheduler is not None and not is_reduce_on_plateau:
                optimizer = lr_scheduler(optimizer, step)

            batch_loss, batch_accuracy = optimization_step(
                model, criterion, optimizer, x_batch, y_batch
            )
            running_loss += batch_loss
            running_accuracy += batch_accuracy
            

            if step % validation_step == 0:
                model.eval()
                test_loss, test_accuracy = evaluate(
                    model, criterion, val_iterator, n_validation_batches
                )
                end = time.time()
                
                print('validation_step: {0:.2f}  avg_train_loss: {1:.3f}  avg_val_loss: {2:.3f}  avg_train_acc: {3:.3f}  avg_val_acc: {4:.3f}  time_per_val_step: {5:.3f}'.format(
                    step/n_batches, running_loss/validation_step, test_loss,
                    running_accuracy/validation_step, test_accuracy,
                    (end - start)
                ))
                all_losses += [(
                    step/n_batches,
                    running_loss/validation_step, 
                    test_loss,
                    running_accuracy/validation_step, 
                    test_accuracy
                )]
                
                if is_reduce_on_plateau:
                    lr_scheduler.step(test_accuracy)

                summary_train_loss = summary.scalar("train_loss", running_loss/validation_step)
                summary_val_loss = summary.scalar("val_loss", test_loss)
                summary_train_acc = summary.scalar("train_acc", running_accuracy/validation_step)
                summary_val_acc = summary.scalar("val_acc", test_accuracy)
                writer.add_summary(summary_train_loss, step/n_batches)
                writer.add_summary(summary_val_loss, step/n_batches)
                writer.add_summary(summary_train_acc, step/n_batches)
                writer.add_summary(summary_val_acc, step/n_batches)    

                running_loss = 0.0
                running_accuracy = 0.0
                start = time.time()
                model.train()

        if saving_epoch is not None and epoch % saving_epoch == 0:
            print('saving model for epoch {}.'.format(epoch))
            torch.save(
            model.state_dict(),
            '%s/resnet50_epoch_%d.pth' % ("./output/Model", epoch))
    writer.close()
    return all_losses