#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/12/8 22:59
# @Author : xjp
# @Site : 
# @File : WGAN-GP.py
# @Software: PyCharm
import tensorflow as tf
def gradient_penalty(Discriminator,batch_x,fake_number):
    """
    梯度惩罚项计算函数
    @param Discriminator:
    @param batch_x:
    @param fake_number:
    @return:
    """
    batchsz = batch_x.shape[0]
    # 每个样本用于插值
    t = tf.random.uniform([batchsz,1,1,30])
    # 将随机生成的样本扩展为x的形状，[b,1,1,1]=>[b,h,w,c]
    t= tf.broadcast_to(t,batch_x.shape)
    # 在真假样本之间做线性插值
    interplate = t* batch_x +(1-t)*fake_number
    # 在梯度环境中计算D对插值样本的梯度
    with tf.GradientTape() as tape:
        tape.watch([interplate])
        d_interplote_logits =Discriminator(interplate)
        grads = tape.gradient(d_interplote_logits,interplate)
        # 计算每个样本的梯度的范数[b,h,w,c]  =>[b,-1]
        grads =tf.reshape(grads,[grads.shape[0],-1])
        gp = tf.norm(grads,axis = 1)
        # 计算梯度惩罚项
        gp = tf.reduce_mean((gp-1.)**2)
    return gp
    # 梯度惩罚项计算函数
def d_loss_fn(generator,discriminator,batch_z,batch_x,is_training):
    """
    计算鉴别器的损失函数
    @param generator:
    @param discriminator:
    @param batch_z:
    @param batch_x:
    @param is_training:
    @return:
    """
    fake_number = generator(batch_z,is_training)  #假样本
    d_fake_logits = discriminator(fake_number,is_training) #假样本的输出
    d_real_logits = discriminator(batch_x,is_training)#真样本的输出
    #计算梯度惩罚项
    gp = gradient_penalty(discriminator,batch_x,fake_number)
    loss = tf.reduce_mean(d_fake_logits)-tf.reduce_mean(d_real_logits)+10*gp
    return loss,gp
def g_loss_fn(generator,discriminator,batch_z,is_training):
    """
    生成器的损失函数
    @param generator:
    @param discriminator:
    @param batch_z:
    @param is_training:
    @return:
    """
    fake_number = generator(batch_z,is_training)
    d_fake_logits = discriminator(fake_number,is_training)
    loss = -tf.reduce_mean(d_fake_logits)
    return loss