import torch
from torch.autograd import Variable


def ones_target(size):
    '''
    Tensor containing ones, with shape = size
    '''
    data = Variable(torch.ones(size))
    return data


def zeros_target(size):
    '''
    Tensor containing zeros, with shape = size
    '''
    data = Variable(torch.zeros(size))
    return data


def train_conditional_gan(train_data_iterator, generator, discriminator, optimizer_G, optimizer_D, criterion,
                          start_epoch, epochs, loss_threshold, device, checkpoint_dir, model_dir, save_every,
                          print_every, train_D_steps, train_G_steps):
    """
    Run training loop in epochs.
    In one epoch, have certain number of steps for which you optimize for Discriminator
    Then have one step for
    :return:
    """
    for epoch in range(start_epoch, epochs):
        print('Training epoch {} ...'.format(epoch))

        losses_G = []
        losses_D = []

        discriminator.train()
        generator.train()

        # if (epoch + 1) % print_every == 0:
        #     print("Running epoch {} / {}".format(epoch + 1, epochs))
        #
        # logger.info("Running epoch {} / {}".format(epoch + 1, epochs))

        # Train discriminator for train_D_steps
        total_D_loss = 0
        for num_steps_D, data in enumerate(train_data_iterator):
            # Segregating data
            lyrics_seq = data[0].to(device)
            cont_val_seq = data[1].to(device)
            discrete_val_seq = data[2].to(device)
            noise_seq = data[3].to(device)

            optimizer_D.zero_grad()

            # Train on fake data
            fake_G_out = generator(lyrics_seq, noise_seq).detach()  # detach to avoid training G on these labels
            #             print("Generated MIDI sequence is")
            #             print(fake_G_out)
            fake_D_out = discriminator(fake_G_out, lyrics_seq)
            #             print(fake_D_out)
            fake_val = zeros_target(fake_D_out.shape)
            fake_val = fake_val.to(device)
            #             print(fake_val)
            fake_D_loss = criterion(fake_D_out, fake_val)
            #             print(fake_D_loss)
            fake_D_loss.backward()

            # Train on real data
            #             print("True MIDI sequence is")
            #             print(discrete_val_seq)
            true_D_out = discriminator(discrete_val_seq, lyrics_seq)
            true_val = zeros_target(true_D_out.shape)
            true_val = true_val.to(device)
            true_D_loss = criterion(true_D_out, true_val)
            true_D_loss.backward()

            optimizer_D.step()

            total_D_loss += ((true_D_loss.item() + true_D_loss.item()) / 2)
            # print(loss)
            # print(type(loss))

            if num_steps_D == train_D_steps:
                break

        losses_D.append((total_D_loss))

        print("Loss while training discriminator is: {}".format(total_D_loss))

        # Train Generator for train_G_steps
        total_G_loss = 0
        for num_steps_G, data in enumerate(train_data_iterator):
            lyrics_seq = data[0].to(device)
            cont_val_seq = data[1].to(device)
            discrete_val_seq = data[2].to(device)
            noise_seq = data[3].to(device)

            optimizer_G.zero_grad()

            fake_G_out = generator(lyrics_seq, noise_seq)
            # print("Printing Generator output")
            # print("Shape is: {}".format(fake_G_out.shape))
            # print(fake_G_out)
            fake_D_out = discriminator(fake_G_out, lyrics_seq)
            true_val = ones_target(fake_D_out.shape)
            true_val = true_val.to(device)
            fake_G_loss = criterion(fake_D_out, true_val)

            fake_G_loss.backward()
            optimizer_G.step()

            total_G_loss += fake_G_loss.item()

            if num_steps_G == train_G_steps:
                break

        losses_G.append(total_G_loss)

        print("Loss while training generator is: {}".format(total_G_loss))

        # logger.info("Loss is : {}".format(total_loss))
        #
        # if (epoch + 1) % print_every == 0:
        #     print("Loss is : {}".format(total_loss))
        #
        # if (epoch + 1) % save_every == 0:
        #     loss_change = prev_loss - total_loss
        #     logger.info(
        #         "Change in loss after {} epochs is: {}".format(save_every,
        #                                                        loss_change))
        #     if loss_change > 0:
        #         is_best = True
        #     if loss_change < loss_threshold:
        #         to_break = True
        #
        #     prev_loss = total_loss
        #
        #     logger.info("Creating checkpoint at epoch {}".format(epoch + 1))
        #     checkpoint = {
        #         'epoch': epoch + 1,
        #         'state_dict': model.state_dict(),
        #         'optimizer': optimizer.state_dict()
        #     }
        #     save_checkpoint(checkpoint, is_best, checkpoint_dir, model_dir)
        #     logger.info("Checkpoint created")
        #
        # if to_break:
        #     logger.info(
        #         "Change in loss is less than the threshold. Stopping training")
        #     break

#     logger.info("Completed Training")
