from VGG19 import Vgg19
from DINet import DINet
from Syncnet import SyncDiscriminator as Discriminator
from sync_batchnorm import convert_model
from torch.utils.data import DataLoader

import random, os ,numpy as np

from torch.optim import lr_scheduler
import torch.nn.functional as F
import torch.optim as optim
import torch.nn as nn
import torch
try:
    from ezds.ezdlearn.base import BaseTrainer
except:
    BaseTrainer = nn.Module

class GANLoss(nn.Module):
    '''
    GAN loss
    '''
    def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0):
        super(GANLoss, self).__init__()
        self.register_buffer('real_label', torch.tensor(target_real_label))
        self.register_buffer('fake_label', torch.tensor(target_fake_label))
        if use_lsgan:
            self.loss = nn.MSELoss()
        else:
            self.loss = nn.BCELoss()

    def get_target_tensor(self, input, target_is_real):
        if target_is_real:
            target_tensor = self.real_label
        else:
            target_tensor = self.fake_label
        return target_tensor.expand_as(input)

    def forward(self, input, target_is_real):
        target_tensor = self.get_target_tensor(input, target_is_real)
        return self.loss(input, target_tensor)

class Trainer(BaseTrainer):
    def __init__(self, config):
        super().__init__(config)
        self.config = opt = config
        self.net_g = DINet(config).cuda()
        # freeze the net without audio_encoder
        self.net_g.no_grad_eval()
        self.net_g.audio_encoder.train()
        for p in self.net_g.audio_encoder.parameters():
            p.requires_grad = True
        # -----------------------------------
        self.net_d = Discriminator(config).cuda()
        self.net_vgg = Vgg19().cuda().eval()
        self.optimizer = optim.Adam([
                        {"params":      [p for p in self.net_g.parameters() if p.requires_grad], 
                         "lr":          opt.lr_g,
                         "betas":       (0.5, 0.999)},
                        {"params":      [p for p in self.net_d.parameters() if p.requires_grad], 
                         "lr":          opt.lr_dI,
                         "betas":       (0.5, 0.999)},
                        ])  
        self.criterionGAN = GANLoss().cuda()
        self.criterionL1 = nn.L1Loss().cuda()
        self.criterionMSE = nn.MSELoss().cuda()
        self.d_phase = False

    def save(self, path):
        directory = os.path.dirname(path)
        basename = os.path.basename(path)
        self.net_g.save(os.path.join(directory, 'G_' + basename))
        self.net_d.save(os.path.join(directory, 'D_' + basename))

    def load(self, path):
        directory = os.path.dirname(path)
        basename = os.path.basename(path)
        self.net_g.load(os.path.join(directory, 'G_' + basename))
        self.net_d.load(os.path.join(directory, 'D_' + basename))

    def forward_d(self, pred_real_d, pred_fake_d):
        loss_d_fake = self.criterionGAN(pred_fake_d, False)
        loss_d_real = self.criterionGAN(pred_real_d, True)
        return loss_d_fake, loss_d_real

    def forward_g(self, pred_fake_d):
        loss_g_d = self.criterionGAN(pred_fake_d, True)
        return loss_g_d

    def forward_p(self, fake_out, source_clip, deep_speech_full):
        fake_out_half = F.avg_pool2d(fake_out, 3, 2, 1, count_include_pad=False)
        source_clip_half = F.interpolate(source_clip, scale_factor=0.5, mode='bilinear')
        loss_p = 0.
        loss_p_half = 0.
        perception_real = self.net_vgg(source_clip)
        perception_fake = self.net_vgg(fake_out)
        perception_real_half = self.net_vgg(source_clip_half)
        perception_fake_half = self.net_vgg(fake_out_half)
        for i in range(len(perception_real)):
            loss_p += self.criterionL1(perception_fake[i], perception_real[i].detach())
            loss_p_half += self.criterionL1(perception_fake_half[i], perception_real_half[i].detach())
        return loss_p, loss_p_half

    def forward(self, *data):
        opt = self.config
        data = [d.to(self.device) for d in data]
        source_frame, source_clip_mask, reference_clip, audio_feature_clip, audio_feature_full = data
        real_tensor = torch.tensor(1.0).cuda()
        if len(source_frame.shape) == 4:
            bs, ch, h, w = source_frame.shape
        elif len(source_frame.shape) == 5:
            bs, n, ch, h, w = source_frame.shape
            source_frame = source_frame.reshape(bs*n, ch, h, w)
            source_clip = source_frame.reshape(bs, n*ch, h, w)
            source_clip_mask = source_clip_mask.reshape(bs*n, ch, h, w)
            reference_clip = reference_clip.reshape(bs*n, -1, h, w)
            _, _, fs, ln = audio_feature_clip.shape
            audio_feature_clip = audio_feature_clip.reshape(bs*n, fs, ln)


        fake_out = self.net_g(source_clip_mask, reference_clip, audio_feature_clip)
        pred_fake_d, score_fake_s = self.net_d(fake_out.reshape(*source_clip.shape), audio_feature_full, discrime=self.d_phase)
        pred_real_d, score_real_s = self.net_d(source_clip, audio_feature_full, discrime=self.d_phase)

        if self.d_phase:
            loss_d_fake, loss_d_real = self.forward_d(pred_real_d, pred_fake_d)
            loss_d = (loss_d_fake + loss_d_real) * 0.5
            loss_sync = self.criterionMSE(score_real_s, real_tensor.expand_as(score_real_s))
            with torch.no_grad():
                loss_g = self.forward_g(pred_fake_d)
                loss_p, loss_p_half = self.forward_p(fake_out, source_frame, audio_feature_full)
        else:
            with torch.no_grad():
                loss_d_fake, loss_d_real = self.forward_d(pred_real_d, pred_fake_d)
                loss_d = (loss_d_fake + loss_d_real) * 0.5
            loss_g = self.forward_g(pred_fake_d)
            loss_p, loss_p_half = self.forward_p(fake_out, source_frame, audio_feature_full)
            loss_sync = self.criterionMSE(score_fake_s, real_tensor.expand_as(score_real_s))

        self.d_phase = not self.d_phase

        return {
            "metric/perceptual_loss": float(loss_p),
            "metric/perceptual_half": float(loss_p_half),
            "metric/disc_loss_fake" : float(loss_d_fake),
            "metric/disc_loss_real" : float(loss_d_real),
            "metric/syncscore_fake" : float(score_real_s.mean()),
            "metric/syncscore_real" : float(score_fake_s.mean()),
            "loss/gen_loss"         : loss_g,
            "loss/perceptual_loss"  : (loss_p + loss_p_half) * opt.lamb_perception,
            "loss/disc_loss"        : loss_d,
            "loss/sync_loss"        : loss_sync * opt.lamb_syncnet_perception,
        }

    def run_summary_task(self, data=None):
        recit = self.iternum - self.beginit
        if self.writer is None:
            return
        if recit % (5 * self.config.rec_per_iter) == 0:
            with torch.no_grad():
                source_clip, source_clip_mask, reference_clip, deep_speech_clip, deep_speech_full = data
                if len(source_clip.shape) == 5:
                    bs, n, ch, h, w = source_clip.shape
                    source_clip = source_clip.reshape(bs*n, ch, h, w)
                    source_clip_mask = source_clip_mask.reshape(bs*n, ch, h, w)
                    reference_clip = reference_clip.reshape(bs*n, -1, h, w)
                    _, _, fs, ln = deep_speech_clip.shape
                    deep_speech_clip = deep_speech_clip.reshape(bs*n, fs, ln)
                if len(source_clip.shape) == 4:
                    source_clip = source_clip[:1, ...]
                    source_clip_mask = source_clip_mask[:1, ...]
                    reference_clip = reference_clip[:1, ...]
                    deep_speech_clip = deep_speech_clip[:1, ...]
                fake_out = self.net_g(source_clip_mask.to(self.device), 
                                    reference_clip.to(self.device), 
                                    deep_speech_clip.to(self.device))
                self.writer.add_image('source_clip', source_clip[0], self.iternum, dataformats='CHW')
                self.writer.add_image('fake_out', fake_out[0], self.iternum, dataformats='CHW')
        super().run_summary_task(data)
    

#%%
if __name__ == "__main__":
    from ezds.ezdlearn.utils import visual as vt
    from ezds.ezdlearn.config import load_config
    config = load_config('configs/DINet_frame.yaml')
    trainer = Trainer(config)
    trainer.load_auto()
    from clip_dataset import DINetDataset
    dset = DINetDataset(config, is_train=True)
    from torch.utils.data import DataLoader
    loader = DataLoader(dset, batch_size=1, shuffle=True, num_workers=0)
    ress = []
    for idx, data in enumerate(loader):
        res = trainer._train_step(*data)
        ress.append(res)
        if idx==10:
            break