import os

import numpy as np

from comp.DiscriminatorLSTMFrans import DiscriminatorLSTMFrans as DiscriminatorLSTM
from comp.DiscriminatorCandidate1_no import DiscriminatorCandidate1_no
from comp.GeneratorRMC import GeneratorRMC
from comp.GeneratorLSTM import GeneratorLSTM
import torch
import torch.nn as nn
import yaml
from torch import optim
import time

from torch.utils import data

from comp.DataloadernoConJP import DataloadernoConJP as Dataloader
from comp.LossComputeWithModule import LossComputeWithModule
from comp.exp_lr_scheduler import exp_lr_scheduler

from comp.utils import ones_target, zeros_target

# EPS = 1e-4
torch.set_default_tensor_type(torch.DoubleTensor)

class LSTMGanForShell(nn.Module):
    def __init__(self, opt):
        super(LSTMGanForShell, self).__init__()
        self.info=str(opt)
        self.EPS = opt.EPS

        self.generator_out_dim: int = opt.generator_out_dim
        self.head_size: int = opt.head_size
        self.num_heads: int = opt.num_heads
        self.mem_slots: int = opt.mem_slots

        self.lyrics_dis_rate: float = opt.lyrics_dis_rate
        self.train_data_iterator = None
        self.sequence_len: int = opt.sequence_len
        self.train_D_steps: int = opt.train_D_steps
        self.train_G_steps: int = opt.train_G_steps
        self.total_epoch: int = opt.epochs
        self.save_per_epoch: int = opt.save_per_epoch
        self.this_time = time.strftime("%y_%m_%d_%H_%M_%S", time.localtime())
        self.this_day = time.strftime("%y_%m_%d", time.localtime())
        # default config, you should change in yaml file.
        self.data_params: dict = {'batch_size': int(opt.batch_size),
                                  'shuffle': bool(opt.shuffle),
                                  'num_workers': int(opt.num_workers)}
        # This dic to load data
        self.load_flag: bool = bool(opt.load_model)
        self.learning_rate_D: float = opt.learning_rate_D
        self.learning_rate_G: float = opt.learning_rate_G
        self.discriminator_out_dim: int = opt.discriminator_out_dim

        self.hidden_dim: int = opt.hidden_dim
        self.ff1_out: int = opt.ff1_out

        self.embed_dim: int = opt.embed_dim
        self.cuda: bool = bool(opt.cuda)
        self.lyrics_dim: int = 2 * self.embed_dim
        # self.read_yaml('./config.yaml')
        self.discriminator_input_dim_music: int = self.generator_out_dim
        self.discriminator_input_dim: int = self.embed_dim + self.generator_out_dim
        self.device = torch.device('cuda:0' if self.cuda else 'cpu')

        # self.hidden_dim_for_rmc = self.mem_slots * self.num_heads * self.head_size

        # self.gen = GeneratorRMC(mem_slots=self.mem_slots, num_heads=self.num_heads, head_size=self.head_size,
        #                         embed_dim=self.lyrics_dim, ff1_out=self.ff1_out,
        #                         hidden_dim_lstm_as_input_size=self.embed_dim, out_dim=self.generator_out_dim,
        #                         cuda=self.cuda, init_batch_size=self.data_params['batch_size'])

        self.gen=GeneratorLSTM(self.lyrics_dim, self.ff1_out, self.hidden_dim, self.generator_out_dim)
        self.dis_lyrics = DiscriminatorLSTM(self.discriminator_input_dim, self.hidden_dim,
                                            self.discriminator_out_dim)
        # self.dis_lyrics.load_state_dict(torch.load('/home/b8313/coding/music/melody-generator-gan/src/save_disc/disc_len10_epoch6_loss3.726933209691197e-05'))
        # self.dis_music = DiscriminatorCandidate1_no(self.discriminator_input_dim_music, self.hidden_dim,
        #                                          self.discriminator_out_dim)

        # self.optimizer_G = optim.Adam(self.gen.parameters(), lr=self.learning_rate_G)
        # self.optimizer_D_lyrics = optim.Adam(self.dis_lyrics.parameters(), lr=self.learning_rate_D)
        # self.optimizer_D_music = optim.Adam(self.dis_music.parameters(), lr=self.learning_rate_D)

        self.optimizer_G = optim.SGD(self.gen.parameters(), lr=self.learning_rate_G)
        self.optimizer_D_lyrics = optim.SGD(self.dis_lyrics.parameters(), lr=self.learning_rate_D)
        # self.optimizer_D_music = optim.SGD(self.dis_music.parameters(), lr=self.learning_rate_D)
        self.criterion = LossComputeWithModule('normal')

        self.load_data(from_pth=False)

        if self.load_flag:
            self.load_model('.')
            # no

        if self.cuda:
            self.gen.to(self.device)
            self.dis_lyrics.to(self.device)

    def read_yaml(self, path):
        with open(path, 'r') as config_yaml:
            data = yaml.load(config_yaml, yaml.FullLoader)
        self.cuda = data['cuda']
        self.embed_dim = data['embed_dim']
        self.lyrics_dim = 2 * self.embed_dim
        self.ff1_out = data['ff1_out']
        self.hidden_dim = data['hidden_dim']
        self.generator_out_dim = data['generator_out_dim']
        self.discriminator_input_dim = self.embed_dim + self.generator_out_dim
        self.discriminator_input_dim_music = self.generator_out_dim
        self.discriminator_out_dim = data['discriminator_out_dim']
        self.learning_rate_G = data['learning_rate_G']
        self.learning_rate_D = data['learning_rate_D']
        self.load_flag = data['load_model']
        self.data_params = data['data_params']
        self.total_epoch = data['epochs']
        self.train_G_steps = data['train_G_steps']
        self.train_D_steps = data['train_D_steps']
        self.sequence_len = data['sequence_len']
        self.lyrics_dis_rate = data['lyrics_dis_rate']
        self.mem_slots = data['mem_slots']
        self.num_heads = data['num_heads']
        self.head_size = data['head_size']
        self.save_per_epoch = data['save_per_epoch']

    def save_model(self, epoch):
        pass

    def load_model(self, pth):
        pass

    def load_data(self, from_pth=False):
        if not from_pth:
            training_set = Dataloader('2022-01-15_18_30_13_embeddings10_vector.pt',
                                      '2022-01-15_18_30_13_vocabulary10_lookup.json',
                                      self.sequence_len)
            self.train_data_iterator = data.DataLoader(training_set, **self.data_params)
            print('loaded data')
        else:
            self.train_data_iterator = torch.load('../teswt_set.pth')
            print('loaded data')

    def train_all_epochs(self, save_model_data=False):
        out_data=self.info+'\n'
        for epoch in range(1, self.total_epoch):
            # exp_lr_scheduler(self.optimizer_G, epoch, init_lr=self.learning_rate_G, lr_decay_epoch=25)
            # exp_lr_scheduler(self.optimizer_D_lyrics, epoch, init_lr=self.learning_rate_D, lr_decay_epoch=25)
            print('Training epoch {} ...'.format(epoch))
            out_data+='\nTraining epoch {} ...\n'.format(epoch)

            losses_G = []
            losses_D = []

            self.dis_lyrics.train()
            # self.dis_music.train()
            self.gen.train()

            # if (epoch + 1) % print_every == 0:
            #     print("Running epoch {} / {}".format(epoch + 1, epochs))
            #
            # logger.info("Running epoch {} / {}".format(epoch + 1, epochs))

            # Train discriminator for train_D_steps
            total_D_loss = 0
            for num_steps_D, data in enumerate(self.train_data_iterator):
                # Segregating data
                lyrics_seq = data[0].to(self.device)
                # cont_val_seq = data[1].to(self.device)
                discrete_val_seq = data[1].to(self.device)
                # noise_seq = data[3].to(self.device)

                # self.optimizer_D_lyrics.zero_grad()
                # self.optimizer_D_music.zero_grad()

                #             print(fake_D_out)
                # fake_val = zeros_target(fake_D_out.shape)
                # fake_val = fake_val.to(self.device)
                #             print(fake_val)
                real_D_out_lyrics = self.dis_lyrics(discrete_val_seq, lyrics_seq)
                # real_D_out_music=self.dis_music(discrete_val_seq)
                # real_D_out=self.lyrics_dis_rate*real_D_out_lyrics+(1-self.lyrics_dis_rate)*real_D_out_music
                real_D_out=real_D_out_lyrics
                realscore = torch.mean(real_D_out)
                print('real_D_out score :{}'.format(realscore))

                # Train on fake data
                fake_G_out = self.gen(lyrics_seq).detach()  # detach to avoid training G on these labels
                #             print("Generated MIDI sequence is")
                #             print(fake_G_out)
                fake_D_out_lyrics = self.dis_lyrics(fake_G_out, lyrics_seq)
                # fake_D_out_music=self.dis_music(fake_G_out)
                # fake_D_out=self.lyrics_dis_rate*fake_D_out_lyrics+(1-self.lyrics_dis_rate)*fake_D_out_music
                fake_D_out=fake_D_out_lyrics
                fakescore = torch.mean(fake_D_out)
                print('fake_D_out score :{}'.format(fakescore))

                # _, fake_D_loss = self.criterion(fake_D_out, fake_val)
                # #             print(fake_D_loss)
                # fake_D_loss.backward()

                loss_D_this_step = torch.mean(- torch.log(torch.clamp(1 - real_D_out, self.EPS, 1)) - torch.log(
                    torch.clamp(fake_D_out, self.EPS, 1)))
                # _,loss_D_this_step_fake=self.criterion(fake_D_out,torch.zeros(fake_D_out.size()))
                # _,loss_D_this_step_real=self.criterion(real_D_out,torch.ones(real_D_out.size()))
                self.optimizer_D_lyrics.zero_grad()
                # self.optimizer_D_music.zero_grad()
                # loss_D_this_step_fake.backward()
                # loss_D_this_step_real.backward()
                loss_D_this_step.backward()
                self.optimizer_D_lyrics.step()
                # self.optimizer_D_music.step()

                # self.optimizer_D_music.zero_grad()


                # # Train on real data
                # #             print("True MIDI sequence is")
                # #             print(discrete_val_seq)
                # true_D_out = self.dis_lyrics(discrete_val_seq, lyrics_seq)
                #
                # true_val = zeros_target(true_D_out.shape)
                # true_val = true_val.to(self.device)
                # _, true_D_loss = self.criterion(true_D_out, true_val)
                # true_D_loss.backward()
                #
                # self.optimizer_D_lyrics.step()
                # self.optimizer_D_music.step()
                #
                # total_D_loss += ((fake_D_loss.item() + true_D_loss.item()) / 2)
                # # print(loss)
                # # print(type(loss))
                # total_D_loss += loss_D_this_step_fake+loss_D_this_step_real
                total_D_loss += loss_D_this_step
                if num_steps_D + 1 == self.train_D_steps:
                    break

            # losses_D.append(loss)
            # losses_D.append(total_D_loss)

            print("Loss while training discriminator is: {}".format(total_D_loss))
            out_data += "Loss while training discriminator is: {}\n".format(total_D_loss)

            # Train Generator for train_G_steps
            total_G_loss = 0
            for num_steps_G, data in enumerate(self.train_data_iterator):
                lyrics_seq = data[0].to(self.device)
                # cont_val_seq = data[1].to(self.device)
                # discrete_val_seq = data[2].to(self.device)
                # noise_seq = data[3].to(self.device)

                # self.optimizer_G.zero_grad()

                fake_G_out = self.gen(lyrics_seq)
                # print("Printing Generator output")
                # print("Shape is: {}".format(fake_G_out.shape))
                # print(fake_G_out)
                fake_D_out_lyrics = self.dis_lyrics(fake_G_out, lyrics_seq)
                # fake_D_out_music=self.dis_music(fake_G_out)
                # fake_D_out=self.lyrics_dis_rate*fake_D_out_lyrics+(1-self.lyrics_dis_rate)*fake_D_out_music
                fake_D_out=fake_D_out_lyrics
                # true_val = ones_target(fake_D_out.shape)
                # true_val = true_val.to(self.device)
                # _, fake_G_loss = self.criterion(fake_D_out, true_val)
                fake_G_loss = -torch.mean(torch.log(torch.clamp(1 - fake_D_out, self.EPS, 1)))
                # _,fake_G_loss=self.criterion(fake_D_out,torch.ones(fake_D_out.size()))
                self.optimizer_G.zero_grad()
                fake_G_loss.backward()
                self.optimizer_G.step()

                total_G_loss += fake_G_loss.item()

                if num_steps_G + 1 == self.train_G_steps:
                    break

            losses_G.append(total_G_loss)

            print("Loss while training generator is: {}".format(total_G_loss))
            out_data += "Loss while training generator is: {}\n".format(total_G_loss)

            if (epoch+1) % self.save_per_epoch == 0 :
                if not os.path.exists(
                        os.path.join(os.getcwd(), 'src/save_lstm/{}/{}/'.format(self.this_day, self.this_time))):
                    # apth = os.path.join(os.getcwd(), 'src/save_/{}/{}/'.format(self.this_day, self.this_time))
                    os.makedirs(os.path.join(os.getcwd(), 'src/save_lstm/{}/{}/'.format(self.this_day, self.this_time)))
                # torch.save(self.gen.state_dict(), os.path.join(os.getcwd(),
                #                                                'src/save_lstm/{}/{}/gen_{}.pth'.format(self.this_day,
                #                                                                                   self.this_time,epoch+1)))
                self.get_gen_data(epoch+1)
        with open('src/save_lstm/{}/{}/log.txt'.format(self.this_day, self.this_time),'w') as flog:
            flog.write(out_data)


    def train_gen(self, epoch: int):
        pass

    def train_dis(self, epoch: int):
        pass

    def get_gen_data(self,epoch, save=False):
        self.gen.eval()
        self.dis_lyrics.eval()

        with torch.no_grad():
            for i, test_data in enumerate(self.train_data_iterator):
                lyrics_seq = test_data[0].to(self.device)
                # cont_val_seq = test_data[1].to(self.device)
                discrete_val_seq = test_data[1].to(self.device)
                # noise_seq = test_data[3].to(self.device)
                gen_out = self.gen(lyrics_seq)
                base_data_root=os.path.join(os.getcwd(), 'src/save_lstm/{}/{}/'.format(self.this_day, self.this_time))

                np.save(os.path.join(base_data_root, 'epoch{}_gen_data_{}'.format(epoch, i)),
                        torch.cat((discrete_val_seq, gen_out), dim=2))

                print('save data in path {}'.format(os.path.join(base_data_root, 'epoch{}_gen_data_{}'.format(epoch, i))))

                # np.save('ftest15', gen_out.numpy())
                # if i % 5000 == 0:
                #     print("Lyrics sequence is: {}".format(lyrics_seq.shape))
                #     print("Content Value Sequence is: {}".format(cont_val_seq.shape))
                #     print("Discrete value sequence is: {}".format(discrete_val_seq.shape))
                #     print("This turn {}".format(i))  # 31202
                #     gen_out = self.gen(lyrics_seq, noise_seq)
                #     print("gen_out is: {}".format(gen_out.shape))

                # discriminator(gen_out, lyrics_seq)
                # print(len(lyrics_seq))
                # Lyrics sequence is: torch.Size([100, 10, 32])
                # Content Value Sequence is: torch.Size([100, 10, 4])
                # Discrete value sequence is: torch.Size([100, 10, 3])
                # 100

                # generator.zero_grad()
                # discriminator.zero_grad()

                # gen_out = self.gen(lyrics_seq, noise_seq)
                # print(gen_out[2])
                #
                # dis_out = self.dis(gen_out, lyrics_seq)
                #
                # print('generator out loss : {} ,discriminator out loss : {}'.format(gen_out, dis_out))
                if i == 5:
                    break

            # break
