import numpy as np
from pathlib import Path
from datetime import date
import argparse
import shutil

import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
import torch.optim as optim
from torch.utils import data
import json

from comp.DataloaderJP import DataloaderJP as Dataloader

torch.set_default_tensor_type(torch.DoubleTensor)

class LossCompute(object):
    def __init__(self):
        self.criterion = nn.L1Loss()

    def __call__(self, x, y):
        """
        Call to compute loss
        :param x: predicted value
        :param y: actual value
        :return:
        """
        loss = self.criterion(x, y)
        return loss


class Gener(nn.Module):
    def __init__(self, embed_dim, ff1_out, hidden_dim, out_dim):
        super(Gener, self).__init__()

        self.input_ff = nn.Linear(embed_dim, ff1_out)

        # self.lstm = nn.LSTM(ff1_out, hidden_dim, num_layers=1,batch_first=True,bidirectional=False)
        self.lstm=nn.GRU(ff1_out,hidden_dim,num_layers=1,batch_first=True,bidirectional=False)
        # self.lstm = nn.RNN(ff1_out, hidden_dim, num_layers=1, batch_first=True, bidirectional=False)
        self.output_ff = nn.Linear(ff1_out, out_dim)

        # self.my_line=nn.Linear(ff1_out,ff1_out)
        # self.mydrop=nn.Dropout()

    def forward(self, lyrics, noise):
        # concat_lyrics=lyrics
        concat_lyrics = torch.cat((lyrics,  noise), 2)
        # print("Concat Shape: {}".format(concat_lyrics.shape))
        # out_ff=self.input_ff(concat_lyrics)
        out1 = F.relu(self.input_ff(concat_lyrics))
        # print("Output size of first layer {}".format(out1.shape))
        # print(out1.shape)
        # lstm_out, _ = self.lstm(out1)
        # The input to LSTM needs to be reshaped.
        lstm_out, _ = self.lstm(out1)
        # print(lstm_out.shape)

        tag = self.output_ff(lstm_out)
        # print(tag.shape)

        # print(tag)
        return tag

        # concat_lyrics = torch.cat((lyrics, discrete_val_seq, noise), 2)
        # # print("Concat Shape: {}".format(concat_lyrics.shape))
        # # out_ff=self.input_ff(concat_lyrics)
        # out1 = F.relu(self.input_ff(concat_lyrics))
        # # print("Output size of first layer {}".format(out1.shape))
        # # print(out1.shape)
        # # lstm_out, _ = self.lstm(out1)
        # # The input to LSTM needs to be reshaped.
        # # lstm_out=self.mydrop(F.relu(self.my_line(out1)))
        #
        # # print(lstm_out.shape)
        # lstm_out=self.my_line(out1)
        # lstm_out=F.relu(lstm_out)
        # lstm_out=self.mydrop(lstm_out)
        #
        #
        # # tag = self.output_ff(lstm_out.view(lstm_out.shape[1], lstm_out.shape[0], -1))
        # # print(tag.shape)
        # tag=self.output_ff(lstm_out)
        # # print(tag)
        # return tag



if __name__ == '__main__':
    use_cuda = torch.cuda.is_available()
    # device = torch.device('cuda:0' if use_cuda else 'cpu')

    device = 'cpu'

    data_params = {'batch_size': 500,
                   'shuffle': True,
                   'num_workers': 8}

    # TODO: This
    learning_rate_G = 0.1
    learning_rate_D = 0.0001

    sequence_len = 20
    training_set = Dataloader('2022-01-15_18_30_13_embeddings10_vector.pt',
                              '2022-01-15_18_30_13_vocabulary10_lookup.json',
                              sequence_len)
    train_data_iterator = data.DataLoader(training_set, **data_params)

    embed_dim = 20
    lyrics_dim = 2 * embed_dim
    ff1_out = 400
    hidden_dim = 400
    generator_out_dim = 3

    discriminator_input_dim = embed_dim + generator_out_dim
    discriminator_out_dim = 1

    generator = Gener(lyrics_dim, ff1_out, hidden_dim, generator_out_dim)
    optimizer_G = optim.Adam(generator.parameters(), lr=learning_rate_G)
    criterion = LossCompute()

    start_epoch = 0
    epochs = 200
    train_D_steps = 1
    train_G_steps = 1

    for epoch in range(epochs):
        # loss=[]
        generator.train()

        total_loss = 0
        for step, data in enumerate(train_data_iterator):
            lyrics_seq = data[0]

            discrete_val_seq = data[2]
            noise_seq = data[3]



            fakeout = generator(lyrics_seq, noise_seq)
            print('fake_out shape : {}'.format(fakeout.shape))

            # fake_loss = -criterion(discrete_val_seq, fakeout)
            fake_loss = criterion(discrete_val_seq,
                                  fakeout)
            optimizer_G.zero_grad()
            fake_loss.backward()

            optimizer_G.step()

            total_loss += fake_loss.item()
            print('epoch {}'.format(epoch))

            if step+1 == 1:

                break
        print('gene loss : {}'.format(total_loss))

    generator.eval()

    with torch.no_grad():
        for i, data in enumerate(train_data_iterator):
            lyrics_seq = data[0]
            cont_val_seq = data[1]
            discrete_val_seq = data[2]
            noise_seq = data[3]
            print("Lyrics sequence is: {}".format(lyrics_seq.shape))
            print("Content Value Sequence is: {}".format(cont_val_seq.shape))
            print("Discrete value sequence is: {}".format(discrete_val_seq.shape))

            # print(len(lyrics_seq))

            # generator.zero_grad()
            # discriminator.zero_grad()
            # np.save('sangle_save/gene_source_{}'.format(i),
            #         torch.cat((lyrics_seq, discrete_val_seq, noise_seq), dim=2).cpu().numpy())
            gen_out = generator(lyrics_seq,  noise_seq)
            # np.save('sangle_save/gene_moto_{}'.format(i), gen_out.cpu().numpy())
            np.save('sangle_save/genera_gru_{}'.format(i),
                    torch.cat((discrete_val_seq,gen_out), dim=2).cpu().numpy())

            if i == 5:
                break
