# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Custom loss functions.

This file contains custom losses used for our models.
"""
from typing import Optional

import tensorflow as tf


def vae_kl_divergence(
    z_mean: tf.Tensor,
    z_log_var: tf.Tensor,
) -> tf.Tensor:
  """Compute the KL divergence loss for the VAE model.

  This function computes the KL divergence of the distribution generated by the
  VAE encoder and the gaussian. This is used as part of the training for
  the VAE model.

  Args:
    z_mean: Mean of the conditional distribution generated by the encoder.
    z_log_var: Log variance of the conditional distribution generated by the
      encoder.

  Returns:
    The loss tensor for the provided input.
  """

  kl_divergence = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))
  loss = tf.reduce_sum(kl_divergence)
  return loss


def reconstruction_loss(
    real_embed: tf.Tensor,
    generated_embed: tf.Tensor,
    loss_type: str,
) -> tf.Tensor:
  """Calculate the reconstruction loss in multiple views/obfuscations.

  This function is used to calculate the quality of the reconstructed
  embeddings, using the real obfuscated embeddings as ground truth.

  Args:
    real_embed: Given embeddigns from obfuscated images. This must be a
      tensor of shape (batch_size, number_of_obfuscations, embedding_size).
    generated_embed: Generated embeddings from obfuscated images. This must
      have the same shape as real_embed.
    loss_type: The embedding loss to be used, currently one of
      [MSE, Contrastive].

  Returns:
    The embedding loss of the AutoEncoder.
  """
  # TODO(smyrnisg): add back contrastive loss.
  if loss_type == 'MSE':
    embed_loss = tf.reduce_mean(
        tf.keras.metrics.mean_squared_error(
            real_embed, generated_embed
        )
    )
  else:
    raise ValueError(
        'Embedding loss type not understood: {}'.format(loss_type)
    )

  return embed_loss


def weighted_crossentropy_loss(
    labels: tf.Tensor,
    real_logits: tf.Tensor,
    generated_logits: tf.Tensor,
    gen_loss_weight: tf.float32,
    gen_labels: Optional[tf.Tensor] = None
) -> tf.Tensor:
  """Calculate a paired cross-entropy loss which includes a weight factor.

  This function calculates a paired cross-entropy loss, using logits derived
  from both real and generated embeddings. The total loss is a sum of the
  two crossentropy losses between the logits of the real embeddings and the
  labels, and the logits between the generated logits and the labels. Since the
  latter are trained jointly with the classifier, their loss is scaled by a
  weight factor. This weight factor should ramp up during training (so the loss
  due to the generated embeddings isn't as important in the beginning).

  Args:
    labels: The labels for the given batch.
    real_logits: The logits the classifier provides for the real embeddings.
    generated_logits: The logits the classifier provides for the generated
      embeddings.
    gen_loss_weight: The factor with which to multiply the loss between
      generated_logits and labels.
    gen_labels: Optional labels for extra generated samples.

  Returns:
    The paired crossentropy loss as described.
  """
  loss_real = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels, real_logits
  )
  loss_gen = tf.nn.sparse_softmax_cross_entropy_with_logits(
      gen_labels if gen_labels is not None else labels, generated_logits
  )
  total_loss = tf.reduce_mean(tf.concat(
      [loss_real, gen_loss_weight * loss_gen], axis=0
  ))
  return total_loss
