import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False  # 正确显示负号

def mean_squared_error(y_true, y_pred):
    """均方误差损失函数"""
    # 确保输入是适当形状的张量
    y_true = tf.convert_to_tensor(y_true, dtype=tf.float32)
    y_pred = tf.convert_to_tensor(y_pred, dtype=tf.float32)

    # 如果输入是标量，扩展维度
    if y_true.shape.ndims == 0:
        y_true = tf.expand_dims(y_true, 0)
    if y_pred.shape.ndims == 0:
        y_pred = tf.expand_dims(y_pred, 0)

    # 计算均方误差
    return tf.reduce_mean(tf.square(y_true - y_pred))


def mean_absolute_error(y_true, y_pred):
    """平均绝对误差损失函数"""
    y_true = tf.convert_to_tensor(y_true, dtype=tf.float32)
    y_pred = tf.convert_to_tensor(y_pred, dtype=tf.float32)

    # 如果输入是标量，扩展维度
    if y_true.shape.ndims == 0:
        y_true = tf.expand_dims(y_true, 0)
    if y_pred.shape.ndims == 0:
        y_pred = tf.expand_dims(y_pred, 0)

    return tf.reduce_mean(tf.abs(y_true - y_pred))


def binary_crossentropy(y_true, y_pred):
    """二元交叉熵损失函数"""
    y_true = tf.convert_to_tensor(y_true, dtype=tf.float32)
    y_pred = tf.convert_to_tensor(y_pred, dtype=tf.float32)

    # 添加小的epsilon防止log(0)
    epsilon = tf.keras.backend.epsilon()
    y_pred = tf.clip_by_value(y_pred, epsilon, 1.0 - epsilon)

    # 如果输入是标量，扩展维度
    if y_true.shape.ndims == 0:
        y_true = tf.expand_dims(y_true, 0)
    if y_pred.shape.ndims == 0:
        y_pred = tf.expand_dims(y_pred, 0)

    return tf.reduce_mean(-(y_true * tf.math.log(y_pred) + (1 - y_true) * tf.math.log(1 - y_pred)))


def huber_loss(y_true, y_pred, delta=1.0):
    """Huber损失函数"""
    y_true = tf.convert_to_tensor(y_true, dtype=tf.float32)
    y_pred = tf.convert_to_tensor(y_pred, dtype=tf.float32)

    # 如果输入是标量，扩展维度
    if y_true.shape.ndims == 0:
        y_true = tf.expand_dims(y_true, 0)
    if y_pred.shape.ndims == 0:
        y_pred = tf.expand_dims(y_pred, 0)

    error = y_true - y_pred
    abs_error = tf.abs(error)
    quadratic = tf.minimum(abs_error, delta)
    linear = abs_error - quadratic
    return tf.reduce_mean(0.5 * quadratic ** 2 + delta * linear)


def categorical_crossentropy(y_true, y_pred):
    """分类交叉熵损失函数"""
    y_true = tf.convert_to_tensor(y_true, dtype=tf.float32)
    y_pred = tf.convert_to_tensor(y_pred, dtype=tf.float32)

    # 添加小的epsilon防止log(0)
    epsilon = tf.keras.backend.epsilon()
    y_pred = tf.clip_by_value(y_pred, epsilon, 1.0 - epsilon)

    # 如果输入是标量，扩展维度（这里处理简单情况）
    if y_true.shape.ndims == 0:
        y_true = tf.expand_dims(y_true, 0)
    if y_pred.shape.ndims == 0:
        y_pred = tf.expand_dims(y_pred, 0)

    return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred), axis=-1))


def visualize_loss_functions():
    """可视化常见的损失函数"""

    # 创建预测值范围
    y_pred = np.linspace(-3, 3, 100)

    # 固定真实值为0，观察损失函数形状
    y_true = np.zeros_like(y_pred)

    # 创建图形
    fig, axes = plt.subplots(2, 3, figsize=(15, 10))
    fig.suptitle('常见损失函数可视化', fontsize=16)

    # 1. 均方误差 (MSE)
    mse_losses = []
    for pred in y_pred:
        loss = mean_squared_error(0.0, pred)
        mse_losses.append(float(loss))

    axes[0, 0].plot(y_pred, mse_losses, 'b-', linewidth=2)
    axes[0, 0].set_title('均方误差 (Mean Squared Error)')
    axes[0, 0].set_xlabel('预测值')
    axes[0, 0].set_ylabel('损失')
    axes[0, 0].grid(True, alpha=0.3)

    # 2. 平均绝对误差 (MAE)
    mae_losses = []
    for pred in y_pred:
        loss = mean_absolute_error(0.0, pred)
        mae_losses.append(float(loss))

    axes[0, 1].plot(y_pred, mae_losses, 'r-', linewidth=2)
    axes[0, 1].set_title('平均绝对误差 (Mean Absolute Error)')
    axes[0, 1].set_xlabel('预测值')
    axes[0, 1].set_ylabel('损失')
    axes[0, 1].grid(True, alpha=0.3)

    # 3. Huber损失
    huber_losses = []
    for pred in y_pred:
        loss = huber_loss(0.0, pred)
        huber_losses.append(float(loss))

    axes[0, 2].plot(y_pred, huber_losses, 'g-', linewidth=2)
    axes[0, 2].set_title('Huber损失')
    axes[0, 2].set_xlabel('预测值')
    axes[0, 2].set_ylabel('损失')
    axes[0, 2].grid(True, alpha=0.3)

    # 4. 二元交叉熵 (固定真实值为0.5)
    y_true_bce = 0.5 * np.ones_like(y_pred)
    # 限制预测值范围在(0,1)之间以避免无穷大
    y_pred_bce = np.linspace(0.01, 0.99, 100)
    bce_losses = []
    for pred in y_pred_bce:
        loss = binary_crossentropy(0.5, pred)
        bce_losses.append(float(loss))

    axes[1, 0].plot(y_pred_bce, bce_losses, 'm-', linewidth=2)
    axes[1, 0].set_title('二元交叉熵 (Binary Crossentropy)')
    axes[1, 0].set_xlabel('预测值')
    axes[1, 0].set_ylabel('损失')
    axes[1, 0].grid(True, alpha=0.3)

    # 5. 分类交叉熵 (示例：真实标签为[0.3, 0.7]，预测为二维)
    # 为了可视化，我们固定一个维度，变化另一个维度
    cce_losses = []
    y_pred_x = np.linspace(0.01, 0.99, 100)
    for x in y_pred_x:
        # 保持概率和为1
        y_true_cce = [0.3, 0.7]
        y_pred_cce = [x, 1 - x]
        loss = categorical_crossentropy(y_true_cce, y_pred_cce)
        cce_losses.append(float(loss))

    axes[1, 1].plot(y_pred_x, cce_losses, 'c-', linewidth=2)
    axes[1, 1].set_title('分类交叉熵 (Categorical Crossentropy)')
    axes[1, 1].set_xlabel('第一个类别的预测概率')
    axes[1, 1].set_ylabel('损失')
    axes[1, 1].grid(True, alpha=0.3)

    # 6. 对比MSE和Huber损失
    axes[1, 2].plot(y_pred, mse_losses, 'b-', linewidth=2, label='MSE')
    axes[1, 2].plot(y_pred, huber_losses, 'g-', linewidth=2, label='Huber')
    axes[1, 2].set_title('MSE vs Huber损失对比')
    axes[1, 2].set_xlabel('预测值')
    axes[1, 2].set_ylabel('损失')
    axes[1, 2].legend()
    axes[1, 2].grid(True, alpha=0.3)

    plt.tight_layout()
    plt.show()


def visualize_loss_3d():
    """3D可视化损失函数（预测值和真实值都变化）"""

    # 创建网格
    y_true = np.linspace(-2, 2, 50)
    y_pred = np.linspace(-2, 2, 50)
    Y_true, Y_pred = np.meshgrid(y_true, y_pred)

    # 计算MSE损失
    mse_loss = (Y_pred - Y_true) ** 2

    # 创建3D图形
    fig = plt.figure(figsize=(12, 5))

    # MSE损失的3D表面图
    ax1 = fig.add_subplot(121, projection='3d')
    surf1 = ax1.plot_surface(Y_true, Y_pred, mse_loss, cmap='viridis', alpha=0.8)
    ax1.set_xlabel('真实值')
    ax1.set_ylabel('预测值')
    ax1.set_zlabel('MSE损失')
    ax1.set_title('MSE损失函数 (3D)')
    fig.colorbar(surf1, ax=ax1, shrink=0.5)

    # MSE损失的等高线图
    ax2 = fig.add_subplot(122)
    contour = ax2.contour(Y_true, Y_pred, mse_loss, levels=20)
    ax2.clabel(contour, inline=True, fontsize=8)
    ax2.set_xlabel('真实值')
    ax2.set_ylabel('预测值')
    ax2.set_title('MSE损失函数 (等高线)')
    ax2.grid(True, alpha=0.3)

    plt.tight_layout()
    plt.show()


if __name__ == '__main__':
    # 可视化常见损失函数
    visualize_loss_functions()

    # 3D可视化MSE损失函数
    visualize_loss_3d()
