import numpy as np
from sklearn.datasets import make_moons
from sklearn.preprocessing import StandardScaler

# 生成数据集
X, y = make_moons(n_samples=200, noise=0.2, random_state=42)
y = y.reshape(-1, 1)  # 将标签转换为列向量

# 数据标准化
scaler = StandardScaler()
X = scaler.fit_transform(X)

# 神经网络参数
input_size = 2
hidden_size = 4
output_size = 1
learning_rate = 0.01
epochs = 2000

# 参数初始化
np.random.seed(42)
W1 = np.random.randn(input_size, hidden_size) * 0.1
b1 = np.zeros((1, hidden_size))
W2 = np.random.randn(hidden_size, output_size) * 0.1
b2 = np.zeros((1, output_size))


# 激活函数
def sigmoid(x):
    return 1 / (1 + np.exp(-x))


def sigmoid_derivative(x):
    return x * (1 - x)


# 训练循环
for epoch in range(epochs):
    # 前向传播
    hidden_input = np.dot(X, W1) + b1
    hidden_output = np.tanh(hidden_input)
    final_input = np.dot(hidden_output, W2) + b2
    y_pred = sigmoid(final_input)

    # 计算损失（交叉熵）
    loss = -np.mean(y * np.log(y_pred) + (1 - y) * np.log(1 - y_pred))

    # 反向传播
    d_loss = (y_pred - y) / len(y)
    d_W2 = np.dot(hidden_output.T, d_loss)
    d_b2 = np.sum(d_loss, axis=0, keepdims=True)

    d_hidden = np.dot(d_loss, W2.T) * (1 - hidden_output ** 2)  # tanh导数
    d_W1 = np.dot(X.T, d_hidden)
    d_b1 = np.sum(d_hidden, axis=0, keepdims=True)

    # 更新参数
    W2 -= learning_rate * d_W2
    b2 -= learning_rate * d_b2
    W1 -= learning_rate * d_W1
    b1 -= learning_rate * d_b1

    if epoch % 200 == 0:
        print(f"Epoch {epoch}, Loss: {loss:.4f}")


# 预测函数
def predict(X):
    hidden_input = np.dot(X, W1) + b1
    hidden_output = np.tanh(hidden_input)
    final_input = np.dot(hidden_output, W2) + b2
    y_pred = sigmoid(final_input)
    return (y_pred > 0.5).astype(int)


# 测试准确率
predictions = predict(X)
accuracy = np.mean(predictions == y)
print(f"Final Accuracy: {accuracy * 100:.2f}%")