import numpy as np
from pathlib import Path
from datetime import date
import argparse
import shutil

import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
import torch.optim as optim
from torch.utils import data
import json

from comp.GeneratorRMC import GeneratorRMC


# from src.conditioned_gan import Dataloader
from comp.DataloaderJP import DataloaderJP as Dataloader
from comp.RelationalMemory import RelationalMemory

torch.set_default_tensor_type(torch.DoubleTensor)

class LossCompute(object):
    def __init__(self):
        self.criterion = nn.L1Loss()

    def __call__(self, x, y):
        """
        Call to compute loss
        :param x: predicted value
        :param y: actual value
        :return:
        """
        loss = self.criterion(x, y)
        return loss

def add_gumbel(o_t, eps=1e-10, gpu=False):
    """Add o_t by a vector sampled from Gumbel(0,1)"""
    u = torch.zeros(o_t.size())
    if gpu:
        u = u.cuda()

    u.uniform_(0, 1)


    g_t = -torch.log(-torch.log(u + eps) + eps)
    gumbel_t = o_t + g_t
    return gumbel_t


from torch import nn


# class GeneratorRMC(nn.Module):
#     def __init__(self, embed_dim, ff1_out, hidden_dim_lstm_as_input_size, out_dim, head_size=256, num_heads=2,
#                  mem_slots=1, drop=0.25, cuda=False,init_batch_size=100):
#         super(GeneratorRMC, self).__init__()
#
#         self.input_ff = nn.Linear(embed_dim, hidden_dim_lstm_as_input_size)
#         self.hidden_dim_rmc = mem_slots * num_heads * head_size
#         # self.rmc = RelationalMemory(head_size=head_size, input_size=hidden_dim_lstm_as_input_size, mem_slots=mem_slots)
#
#         # self.lstm = nn.LSTM(ff1_out, hidden_dim, num_layers=2)
#         self.drop = nn.Dropout(drop)
#         self.output_ff = nn.Linear(head_size*mem_slots, out_dim)
#         self.gpu = cuda
#         self.temperature = nn.Parameter(torch.Tensor([1.0]), requires_grad=False)
#         # self.vocab_size = 10
#         self.init_batch_size=init_batch_size
#
#
#         self.line1=nn.Linear(800,400)
#         self.rmc=RelationalMemory(head_size=5120,input_size=400,mem_slots=1)
#
#
#     def forward(self, lyrics):
#
#         concat_lyrics = torch.cat((lyrics,torch.FloatTensor(lyrics.size()).uniform_()), 2)
#         # concat_lyrics = torch.cat((lyrics, torch.zeros(lyrics.size())), 2)
#         # print('concat_lyrics size:{}'.format(concat_lyrics.shape))
#         hidden = self.init_hidden(self.init_batch_size)
#
#
#
#
#         # all_single_out_rmc = []
#         # for i in range(concat_lyrics.shape[1]):
#         #     single_concat_lyrics = torch.unsqueeze(concat_lyrics[0:, i, 0:], 1)
#         #
#         #     out1 = F.relu(self.input_ff(single_concat_lyrics))
#         #     # print('out1 size:{}'.format(out1.shape))
#         #     # emb = self.embeddings(inp).unsqueeze(1)
#         #
#         #     out, hidden = self.rmc(out1, hidden)
#         #     # print('out size:{}'.format(out.shape))
#         #     all_single_out_rmc.append(out)
#         # out = torch.cat(tuple(all_single_out_rmc), dim=1)
#
#
#
#         concat_lyrics=concat_lyrics.view(100,1,-1)
#         out_line=F.relu(self.line1(concat_lyrics))
#         out,hidden=self.rmc(out_line,hidden)
#         out=out.view(100,20,256)
#
#
#
#         tag = self.output_ff(out)
#         # print('tag size:{}'.format(tag.shape))
#         return tag
#
#
#
#     def init_hidden(self, batch_size):
#         memory = self.rmc.initial_state(batch_size)
#         memory = self.rmc.repackage_hidden(memory)
#         return memory.cuda() if self.gpu else memory





if __name__ == '__main__':
    use_cuda = torch.cuda.is_available()
    # device = torch.device('cuda:0' if use_cuda else 'cpu')

    device = 'cpu'

    data_params = {'batch_size': 500,
                   'shuffle': True,
                   'num_workers': 8}

    # TODO: This
    learning_rate_G = 0.1
    learning_rate_D = 0.0001

    sequence_len = 20
    training_set = Dataloader('2022-01-08_19_12_36_embeddings_vector.pt', '2022-01-08_19_12_36_vocabulary_lookup.json',
                              sequence_len)
    train_data_iterator = data.DataLoader(training_set, **data_params)

    embed_dim = 20
    lyrics_dim = 2 * embed_dim
    ff1_out = 400
    hidden_dim = 400
    generator_out_dim = 3

    discriminator_input_dim = embed_dim + generator_out_dim
    discriminator_out_dim = 1

    generator = GeneratorRMC(lyrics_dim, ff1_out, embed_dim, generator_out_dim,init_batch_size=data_params['batch_size'])
    optimizer_G = optim.Adam(generator.parameters(), lr=learning_rate_G)
    criterion = LossCompute()

    start_epoch = 0
    epochs = 50
    train_D_steps = 1
    train_G_steps = 1

    for epoch in range(epochs):
        # loss=[]
        generator.train()

        total_loss = 0
        for step, data in enumerate(train_data_iterator):
            lyrics_seq = data[0]

            discrete_val_seq = data[2]
            noise_seq = data[3]



            fakeout = generator(lyrics_seq)
            print('fake_out shape : {}'.format(fakeout.shape))

            # fake_loss = -criterion(discrete_val_seq, fakeout)
            fake_loss = criterion(discrete_val_seq,
                                  fakeout)
            optimizer_G.zero_grad()
            fake_loss.backward()

            optimizer_G.step()

            total_loss += fake_loss.item()
            print('epoch {}'.format(epoch))

            if step+1 == 1:

                break
        print('gene loss : {}'.format(total_loss))

    generator.eval()

    with torch.no_grad():
        for i, data in enumerate(train_data_iterator):
            lyrics_seq = data[0]
            cont_val_seq = data[1]
            discrete_val_seq = data[2]
            noise_seq = data[3]
            print("Lyrics sequence is: {}".format(lyrics_seq.shape))
            print("Content Value Sequence is: {}".format(cont_val_seq.shape))
            print("Discrete value sequence is: {}".format(discrete_val_seq.shape))

            # print(len(lyrics_seq))

            # generator.zero_grad()
            # discriminator.zero_grad()
            # np.save('sangle_save/gene_source_{}'.format(i),
            #         torch.cat((lyrics_seq, discrete_val_seq, noise_seq), dim=2).cpu().numpy())
            gen_out = generator(lyrics_seq)
            # np.save('gene_moto_{}'.format(i), gen_out.cpu().numpy())
            np.save('sangle_save/rmcgenera_{}'.format(i),
                    torch.cat((discrete_val_seq,gen_out), dim=2).cpu().numpy())

            if i == 5:
                break
