% 1. 初始化参数
x = 2;                % 输入
y = 0.9;              % 真实标签
W1 = 0.5;             % 隐藏层权重
b1 = 0.3;             % 隐藏层偏置
W2 = 0.8;             % 输出层权重
b2 = 0.1;             % 输出层偏置
alpha = 0.1;          % 学习率
maxIter = 1000;       % 最大迭代次数
lossHistory = [];     % 记录损失变化

% 2. Sigmoid激活函数及其导数
sigmoid = @(z) 1 ./ (1 + exp(-z));
sigmoid_deriv = @(z) sigmoid(z) .* (1 - sigmoid(z));

% 3. 迭代训练
for iter = 1:maxIter
    % 前向传播
    z1 = W1 * x + b1;
    a1 = sigmoid(z1);
    z2 = W2 * a1 + b2;
    a2 = sigmoid(z2);
    
    % 计算损失
    loss = 0.5 * (y - a2)^2;
    lossHistory(iter) = loss;
    
    % 反向传播：计算梯度
    dL_da2 = -(y - a2);
    da2_dz2 = sigmoid_deriv(z2);
    dz2_dW2 = a1;
    dz2_db2 = 1;
    dL_dW2 = dL_da2 * da2_dz2 * dz2_dW2;
    dL_db2 = dL_da2 * da2_dz2 * dz2_db2;
    
    dz2_da1 = W2;
    da1_dz1 = sigmoid_deriv(z1);
    dz1_dW1 = x;
    dz1_db1 = 1;
    dL_dW1 = dL_da2 * da2_dz2 * dz2_da1 * da1_dz1 * dz1_dW1;
    dL_db1 = dL_da2 * da2_dz2 * dz2_da1 * da1_dz1 * dz1_db1;
    
    % 参数更新
    W2 = W2 - alpha * dL_dW2;
    b2 = b2 - alpha * dL_db2;
    W1 = W1 - alpha * dL_dW1;
    b1 = b1 - alpha * dL_db1;
end

% 4. 结果可视化
figure;
plot(1:maxIter, lossHistory, 'LineWidth', 1.5);
xlabel('Iteration');
ylabel('Loss');
title('Loss Curve of Backpropagation Training');
grid on;

% 5. 输出最终结果
fprintf('Final Output: %.4f\n', a2);
fprintf('Final Loss: %.4f\n', loss);
fprintf('Updated W1: %.4f, b1: %.4f\n', W1, b1);
fprintf('Updated W2: %.4f, b2: %.4f\n', W2, b2);