import torch
import torch.nn as nn
import yaml
from torch import optim
import time

from torch.utils import data

from comp.GeneratorLSTM import GeneratorLSTM
from comp.DiscriminatorLSTM import DiscriminatorLSTM
from comp.Dataloader import Dataloader
from comp.LossCompute import LossCompute

from comp.utils import ones_target, zeros_target

# LSTM LSTM Loss
class Gan1(nn.Module):
    def __init__(self):
        super(Gan1, self).__init__()
        self.train_data_iterator = None
        self.sequence_len: int = 10
        self.train_D_steps: int = 1
        self.train_G_steps: int = 1
        self.total_epoch: int = 30
        self.this_time = time.strftime("%y_%m_%d_%H_%M_%S", time.localtime())
        # default config, you should change in yaml file.
        self.data_params: dict = {'batch_size': 100,
                                  'shuffle': True,
                                  'num_workers': 6}
        # This dic to load data
        self.load_flag: bool = False
        self.learning_rate_D: float = 0.1
        self.learning_rate_G: float = 0.0001
        self.discriminator_out_dim: int = 1
        self.discriminator_input_dim: int = 1
        self.generator_out_dim: int = 3
        self.hidden_dim: int = 400
        self.ff1_out: int = 400
        self.lyrics_dim: int = 64
        self.embed_dim: int = 32
        self.cuda: bool = False
        self.read_yaml('./config.yaml')

        self.device = torch.device('cuda:0' if self.cuda else 'cpu')

        self.gen = GeneratorLSTM(self.lyrics_dim, self.ff1_out, self.hidden_dim, self.generator_out_dim)
        self.dis = DiscriminatorLSTM(self.discriminator_input_dim, self.hidden_dim, self.discriminator_out_dim)

        self.optimizer_G = optim.Adam(self.gen.parameters(), lr=self.learning_rate_G)
        self.optimizer_D = optim.Adam(self.dis.parameters(), lr=self.learning_rate_D)

        self.criterion = LossCompute()

        self.load_data()

        if self.load_flag:
            self.load_model('.')
            # no

        if self.cuda:
            self.gen.to(self.device)
            self.dis.to(self.device)

    def read_yaml(self, path):
        with open(path, 'r') as config_yaml:
            data = yaml.load(config_yaml, yaml.FullLoader)
        self.cuda = data['cuda']
        self.embed_dim = data['embed_dim']
        self.lyrics_dim = 2 * self.embed_dim
        self.ff1_out = data['ff1_out']
        self.hidden_dim = data['hidden_dim']
        self.generator_out_dim = data['generator_out_dim']
        self.discriminator_input_dim = self.embed_dim + self.generator_out_dim
        self.discriminator_out_dim = data['discriminator_out_dim']
        self.learning_rate_G = data['learning_rate_G']
        self.learning_rate_D = data['learning_rate_D']
        self.load_flag = data['load_model']
        self.data_params = data['data_params']
        self.total_epoch = data['epochs']
        self.train_G_steps = data['train_G_steps']
        self.train_D_steps = data['train_D_steps']
        self.sequence_len = data['sequence_len']

    def save_model(self, epoch):
        pass

    def load_model(self, pth):
        pass

    def load_data(self):
        training_set = Dataloader('2022-01-05_23_08_37_embeddings_vector.pt',
                                  '2022-01-05_23_08_37_vocabulary_lookup.json',
                                  self.sequence_len)
        self.train_data_iterator = data.DataLoader(training_set, **self.data_params)
        print('loaded data')

    def train_all_epochs(self):
        for epoch in range(1, self.total_epoch):
            print('Training epoch {} ...'.format(epoch))

            losses_G = []
            losses_D = []

            self.dis.train()
            self.gen.train()

            # if (epoch + 1) % print_every == 0:
            #     print("Running epoch {} / {}".format(epoch + 1, epochs))
            #
            # logger.info("Running epoch {} / {}".format(epoch + 1, epochs))

            # Train discriminator for train_D_steps
            total_D_loss = 0
            for num_steps_D, data in enumerate(self.train_data_iterator):
                # Segregating data
                lyrics_seq = data[0].to(self.device)
                cont_val_seq = data[1].to(self.device)
                discrete_val_seq = data[2].to(self.device)
                noise_seq = data[3].to(self.device)

                self.optimizer_D.zero_grad()

                # Train on fake data
                fake_G_out = self.gen(lyrics_seq, noise_seq).detach()  # detach to avoid training G on these labels
                #             print("Generated MIDI sequence is")
                #             print(fake_G_out)
                fake_D_out = self.dis(fake_G_out, lyrics_seq)
                #             print(fake_D_out)
                fake_val = zeros_target(fake_D_out.shape)
                fake_val = fake_val.to(self.device)
                #             print(fake_val)
                fake_D_loss = self.criterion(fake_D_out, fake_val)
                #             print(fake_D_loss)
                fake_D_loss.backward()

                # Train on real data
                #             print("True MIDI sequence is")
                #             print(discrete_val_seq)
                true_D_out = self.dis(discrete_val_seq, lyrics_seq)
                true_val = zeros_target(true_D_out.shape)
                true_val = true_val.to(self.device)
                true_D_loss = self.criterion(true_D_out, true_val)
                true_D_loss.backward()

                self.optimizer_D.step()

                total_D_loss += ((fake_D_loss.item() + true_D_loss.item()) / 2)
                # print(loss)
                # print(type(loss))

                if num_steps_D == self.train_D_steps:
                    break

            losses_D.append(total_D_loss)

            print("Loss while training discriminator is: {}".format(total_D_loss))

            # Train Generator for train_G_steps
            total_G_loss = 0
            for num_steps_G, data in enumerate(self.train_data_iterator):
                lyrics_seq = data[0].to(self.device)
                # cont_val_seq = data[1].to(self.device)
                # discrete_val_seq = data[2].to(self.device)
                noise_seq = data[3].to(self.device)

                self.optimizer_G.zero_grad()

                fake_G_out = self.gen(lyrics_seq, noise_seq)
                # print("Printing Generator output")
                # print("Shape is: {}".format(fake_G_out.shape))
                # print(fake_G_out)
                fake_D_out = self.dis(fake_G_out, lyrics_seq)
                true_val = ones_target(fake_D_out.shape)
                true_val = true_val.to(self.device)
                fake_G_loss = self.criterion(fake_D_out, true_val)

                fake_G_loss.backward()
                self.optimizer_G.step()

                total_G_loss += fake_G_loss.item()

                if num_steps_G == self.train_G_steps:
                    break

            losses_G.append(total_G_loss)

            print("Loss while training generator is: {}".format(total_G_loss))

    def train_gen(self, epoch: int):
        pass

    def train_dis(self, epoch: int):
        pass

    def get_gen_data(self):
        self.gen.eval()
        self.dis.eval()

        with torch.no_grad():

            for i, test_data in enumerate(self.train_data_iterator):
                lyrics_seq = test_data[0].to(self.device)
                cont_val_seq = test_data[1].to(self.device)
                discrete_val_seq = test_data[2].to(self.device)
                noise_seq = test_data[3].to(self.device)
                print("Lyrics sequence is: {}".format(lyrics_seq.shape))
                print("Content Value Sequence is: {}".format(cont_val_seq.shape))
                print("Discrete value sequence is: {}".format(discrete_val_seq.shape))
                print("This turn {}".format(i)) # 31202

                # print(len(lyrics_seq))
                # Lyrics sequence is: torch.Size([100, 10, 32])
                # Content Value Sequence is: torch.Size([100, 10, 4])
                # Discrete value sequence is: torch.Size([100, 10, 3])
                # 100

                # generator.zero_grad()
                # discriminator.zero_grad()

                # gen_out = self.gen(lyrics_seq, noise_seq)
                #
                # dis_out = self.dis(gen_out, lyrics_seq)
                #
                # print('generator out loss : {} ,discriminator out loss : {}'.format(gen_out, dis_out))

            # break
