import numpy as np
import matplotlib.pyplot as plt


# 生成螺旋数据集
def generate_spiral_data(num_samples=100, num_classes=3, noise=0.1):
    X = np.zeros((num_samples * num_classes, 2))
    y = np.zeros(num_samples * num_classes, dtype=int)

    for class_index in range(num_classes):
        ix = range(num_samples * class_index, num_samples * (class_index + 1))
        r = np.linspace(0.0, 1, num_samples)
        t = np.linspace(class_index * 4, (class_index + 1) * 4, num_samples) + np.random.randn(num_samples) * noise
        X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]
        y[ix] = class_index

    return X, y


# 将标签转换为one-hot编码
def one_hot_encode(y, num_classes):
    return np.eye(num_classes)[y]


# 定义神经网络类
class NeuralNetwork:
    def __init__(self, input_size, hidden_size, output_size, dropout_rate=0.5):
        self.params = {}
        self.params['W1'] = np.random.randn(input_size, hidden_size) * 0.01
        self.params['b1'] = np.zeros((1, hidden_size))
        self.params['W2'] = np.random.randn(hidden_size, output_size) * 0.01
        self.params['b2'] = np.zeros((1, output_size))
        self.dropout_rate = dropout_rate

    def relu(self, x):
        return np.maximum(0, x)

    def softmax(self, x):
        exp_x = np.exp(x - np.max(x, axis=1, keepdims=True))
        return exp_x / np.sum(exp_x, axis=1, keepdims=True)

    def forward(self, X, is_training=True):
        self.cache = {}

        # 第一层前向传播
        z1 = np.dot(X, self.params['W1']) + self.params['b1']
        a1 = self.relu(z1)

        # 应用Dropout（仅在训练时）
        if is_training:
            self.mask = (np.random.rand(*a1.shape) < (1 - self.dropout_rate)) / (1 - self.dropout_rate)
            a1 = a1 * self.mask
        else:
            self.mask = None

        # 第二层前向传播
        z2 = np.dot(a1, self.params['W2']) + self.params['b2']
        a2 = self.softmax(z2)

        # 保存计算图用于反向传播
        self.cache = {'X': X, 'z1': z1, 'a1': a1, 'z2': z2, 'a2': a2}
        return a2

    def compute_loss(self, y_pred, y_true):
        m = y_true.shape[0]
        log_probs = -np.log(y_pred[range(m), y_true])
        loss = np.sum(log_probs) / m
        return loss

    def backward(self, y_true):
        m = y_true.shape[0]
        grad = {}

        # 从缓存中获取前向传播的值
        X, z1, a1, z2, a2 = [self.cache[k] for k in ['X', 'z1', 'a1', 'z2', 'a2']]

        # 输出层梯度
        dz2 = a2
        dz2[range(m), y_true] -= 1
        dz2 /= m

        grad['W2'] = np.dot(a1.T, dz2)
        grad['b2'] = np.sum(dz2, axis=0, keepdims=True)

        # 隐藏层梯度（考虑ReLU激活）
        da1 = np.dot(dz2, self.params['W2'].T)
        dz1 = da1 * (z1 > 0)

        # 应用Dropout的梯度（如果使用了Dropout）
        if self.mask is not None:
            dz1 = dz1 * self.mask

        grad['W1'] = np.dot(X.T, dz1)
        grad['b1'] = np.sum(dz1, axis=0, keepdims=True)

        return grad

    def update_params(self, grad, learning_rate):
        for key in self.params:
            self.params[key] -= learning_rate * grad[key]

    def train(self, X, y, epochs=10000, learning_rate=0.1, verbose=False):
        loss_history = []

        for epoch in range(epochs):
            # 前向传播（训练模式）
            y_pred = self.forward(X, is_training=True)

            # 计算损失
            loss = self.compute_loss(y_pred, y)
            loss_history.append(loss)

            # 反向传播和参数更新
            grad = self.backward(y)
            self.update_params(grad, learning_rate)

            if verbose and epoch % 1000 == 0:
                print(f"Epoch {epoch}, Loss: {loss:.4f}")

        return loss_history

    def predict(self, X):
        # 前向传播（预测模式，不使用Dropout）
        probs = self.forward(X, is_training=False)
        return np.argmax(probs, axis=1)

    def accuracy(self, X, y):
        preds = self.predict(X)
        return np.mean(preds == y)


# 主程序
if __name__ == "__main__":
    # 生成数据集
    np.random.seed(42)
    X, y = generate_spiral_data(num_samples=100, num_classes=3)
    num_classes = len(np.unique(y))

    # 训练神经网络（带Dropout）
    input_size = 2
    hidden_size = 100
    output_size = num_classes
    dropout_rate = 0.5

    model = NeuralNetwork(input_size, hidden_size, output_size, dropout_rate)
    loss_history = model.train(X, y, epochs=10000, learning_rate=0.1)

    # 计算训练准确率
    train_acc = model.accuracy(X, y)
    print(f"Training Accuracy: {train_acc * 100:.2f}%")

    # 可视化结果
    plt.figure(figsize=(12, 5))

    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(loss_history)
    plt.title("Training Loss")
    plt.xlabel("Epoch")
    plt.ylabel("Loss")

    # 绘制决策边界
    plt.subplot(1, 2, 2)
    x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
    y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
    xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
                         np.linspace(y_min, y_max, 100))

    Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)

    plt.contourf(xx, yy, Z, alpha=0.3)
    plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k')
    plt.title(f"Decision Boundaries (Acc: {train_acc * 100:.1f}%)")

    plt.tight_layout()
    plt.show()