import torch
import torch.nn as nn
from comp.DiscriminatorLSTMFrans import DiscriminatorLSTMFrans as DiscriminatorLSTM
import os
import yaml
from comp.DataloadernoCon import DataloadernoCon as Dataloader
from torch import optim
from torch.utils import data as torchdata


class LossCompute(object):
    def __init__(self):
        self.criterion = nn.BCELoss()

    def __call__(self, x, y):
        """
        Call to compute loss
        :param x: predicted value
        :param y: actual value
        :return:
        """
        loss = self.criterion(x, y)
        return loss

class TrainDiscri(nn.Module):
    def __init__(self):
        super(TrainDiscri, self).__init__()
        with open('./config.yaml', 'r') as config_yaml:
            data = yaml.load(config_yaml, yaml.FullLoader)
        self.epoch=25
        self.data_params = data['data_params']
        self.sequence_len = data['sequence_len']
        self.hidden_dim = data['hidden_dim']
        self.generator_out_dim = data['generator_out_dim']
        self.learning_rate_D = data['learning_rate_D']
        self.embed_dim = data['embed_dim']
        self.discriminator_input_dim = self.embed_dim + self.generator_out_dim

        self.discriminator_out_dim = data['discriminator_out_dim']
        self.dis_lyrics = DiscriminatorLSTM(self.discriminator_input_dim, self.hidden_dim,
                                            self.discriminator_out_dim)
        self.train_D_steps = data['train_D_steps']
        self.criterion=LossCompute()
        self.device='cpu'

        self.optimizer_D_lyrics = optim.Adam(self.dis_lyrics.parameters(), lr=self.learning_rate_D)
        training_set = Dataloader('2022-01-15_18_30_13_embeddings10_vector.pt',
                                  '2022-01-15_18_30_13_vocabulary10_lookup.json',
                                  self.sequence_len)
        self.train_data_iterator = torchdata.DataLoader(training_set, **self.data_params)
        print('loaded data')

    def train_dis(self):
        for e in range(self.epoch):
            print('Training epoch {} ...'.format(e))
            self.dis_lyrics.train()
            total_loss=0
            for num_steps_D, data in enumerate(self.train_data_iterator):

                lyrics_seq = data[0].to(self.device)

                discrete_val_seq = data[1].to(self.device)

                real_D_out = self.dis_lyrics(discrete_val_seq, lyrics_seq)
                # false_D_out=self.dis_lyrics(discrete_val_seq,torch.randn(lyrics_seq.size()))
                loss_true=self.criterion(real_D_out,torch.ones(real_D_out.size()))
                # loss_false=self.criterion(false_D_out,torch.ones(false_D_out.size()))
                loss=loss_true
                self.optimizer_D_lyrics.zero_grad()
                loss.backward()
                self.optimizer_D_lyrics.step()

                if num_steps_D==0:
                    print('loss this epoch{} with step{} is : {}'.format(e,num_steps_D+1,loss))
                    total_loss+=loss
                    break
            if (e+1) % 5 ==0:
                self.save_model(e+1,total_loss)

        print('end')
    def save_model(self,epoch,loss):
        torch.save(self.dis_lyrics.state_dict(),'save_disc/disc_len10_epoch{}_loss{}'.format(epoch+1,loss))
        print('disc_len{}_size{}_epoch{} saved!'.format(self.sequence_len,self.data_params['batch_size'],epoch))

if __name__ == '__main__':
    mo=TrainDiscri()
    mo.train_dis()
    # mo.save_model()



