# -*- coding: utf-8 -*-
"""
# @file name    : loss_function_1.py
# @author       : QuZhang
# @data         : 2020-12-17 18:44
# @brief        : 1. nn.CrossEntropyLoss
                  2. nn.NLLoss
                  3. BCELoss
                  3. BCEWithLogitsLoss
"""
import torch
import torch.nn as nn
import numpy as np

if __name__ == "__main__":
    # fake data
    inputs = torch.tensor([[1, 2], [1, 3], [1, 3]], dtype=torch.float)  # 预测值
    # target = torch.tensor([0, 1, 1], dtype=torch.float)
    target = torch.tensor([0, 1, 1], dtype=torch.long)  # 正确类别

    # =========== CrossEntropy Loss: reduction ==============
    # flag = True
    flag = False
    if flag:
        # def loss function
        # reduction 是设置计算模式
        loss_f_none = nn.CrossEntropyLoss(weight=None, reduction='none')  # 每一个样本的loss都输出
        loss_f_sum = nn.CrossEntropyLoss(weight=None, reduction='sum')  # 所有样本损失的和
        loss_f_mean = nn.CrossEntropyLoss(weight=None, reduction='mean')  # 所有样本损失和的平均

        # forward
        loss_none = loss_f_none(inputs, target)
        loss_sum = loss_f_sum(inputs, target)
        loss_mean = loss_f_mean(inputs, target)

        # view
        print("Cross Entropy Loss:\n{}\n{}\n{}".format(loss_none, loss_sum, loss_mean))

    # ================ compute by hand
    # flag = True
    flag = False
    if flag:
        idx = 0
        input_1 = inputs.detach().numpy()[idx]  # [1, 2]
        target_1 = target.numpy()[idx]

        x_class = input_1[target_1]  # 通过索引得到正确类别对应的值
        log_sum_exp = np.log(np.sum(np.exp(input_1)))
        loss_1 = -x_class + log_sum_exp
        print("第一个样本的loss: ", loss_1)

    # --------------- 各个损失的权重参数 weight ---------------
    # flag = True
    flag = False
    if flag:
        weights = torch.tensor([1, 2], dtype=torch.float)  # 预测为类别1时 计算损失时，损失的权重是1；预测为类别2时 损失对应的权重是2

        # 定义loss
        loss_f_none_w = nn.CrossEntropyLoss(weight=weights, reduction='none')
        loss_f_sum_w = nn.CrossEntropyLoss(weight=weights, reduction='sum')
        loss_f_mean_w = nn.CrossEntropyLoss(weight=weights, reduction='mean')

        # forward
        loss_none_w = loss_f_none_w(inputs, target)
        loss_sum_w = loss_f_sum_w(inputs, target)
        loss_mean_w = loss_f_mean_w(inputs, target)

        # view
        print('\nweights: ', weights)
        print(loss_none_w, loss_sum_w, loss_mean_w)

    # ----------------- compute by hand -------------
    # flag = True
    flag = False
    if flag:
        weights = torch.tensor([1, 2], dtype=torch.float)
        # 使用正确分类值来取出每一个损失对应的权重
        weights_all = np.sum(list(map(lambda x: weights.numpy()[x], target.numpy())))  # [0, 1, 1] -> [1, 2, 2]
        print("weights_all: ", weights_all)
        tmp = 0
        loss_sep = loss_none.detach().numpy()  # 没有权重的损失
        for i in range(target.shape[0]):
            x_class = target.numpy()[i]  # 获取第i+1个数据的正确类别
            tmp += loss_sep[i] * weights.numpy()[x_class]  # 根据类别取出该损失对应的权重，之后加权到原损失上
        mean = tmp / weights_all
        print(mean)

    # =============== BCE Loss ==============
    # flag = True
    flag = False
    if flag:
        inputs = torch.tensor([[1, 2], [2, 2], [3, 4], [4, 5]], dtype=torch.float)  # 4个样本
        target = torch.tensor([[1, 0], [1, 0], [0, 1], [0, 1]], dtype=torch.float)  # 每一个样本里面的预测值对应的标签

        # BCE计算损失时，输入必须是概率取值，在0~1之间
        inputs = torch.sigmoid(inputs)  # 使用sigmoid映射到0~1之间

        loss_f_none = nn.BCELoss(reduction='none')
        loss_f_sum = nn.BCELoss(reduction='sum')
        loss_f_mean = nn.BCELoss(reduction='mean')

        # forward
        loss_none = loss_f_none(inputs, target)
        loss_sum = loss_f_sum(inputs, target)
        loss_mean = loss_f_mean(inputs, target)

        print(loss_none, loss_sum, loss_mean)

    # ================== compute by hand
    # flag = True
    flag = False
    if flag:
        idx = 0
        x_i = inputs.detach().numpy()[idx, idx]
        y_i = target.numpy()[idx, idx]

        # loss
        l_i = -y_i * np.log(x_i) if y_i else -(1-y_i) * np.log(1-x_i)
        print("BCE inputs: ", inputs)
        print("第一个loss为：", l_i)

    # ------------------ BCE with Logistic Loss ----------------
    flag = True
    if flag:
        inputs = torch.tensor([[1, 2], [2, 2], [3, 4], [4, 5]], dtype=torch.float)
        target = torch.tensor([[1, 0], [1, 0], [0, 1], [0, 1]], dtype=torch.float)

        # def loss
        # 会加上sigmoid
        loss_f_none = nn.BCEWithLogitsLoss(reduction='none')
        loss_f_sum = nn.BCEWithLogitsLoss(reduction='sum')
        loss_f_mean = nn.BCEWithLogitsLoss(reduction='mean')

        # forward
        loss_none = loss_f_none(inputs, target)
        loss_sum = loss_f_sum(inputs, target)
        loss_mean = loss_f_mean(inputs, target)

        print(loss_none, loss_sum, loss_mean)
