clear;

% 预定义参数
learn_rate = 0.01;   % 学习率
train_times = 1000; % 训练轮数
hidden_nodes = 12;   % 隐层节点数

% 生成数据
[data,lable] = datagen(0.5);
group = [lable,1-lable];
total_loss = zeros(train_times,1);
x_axis = 1:1:train_times;

% 选取90%作为训练集，剩下的作为测试集
index = randperm(1000,900);
train = data(index,:);
test = data;
test(index,:)=[];
train_target = group(index,:);
test_target = group;
test_target(index,:)=[];

% 预定义权值
% 输入层
% 隐层
W1 = rand(hidden_nodes,2);
b1 = zeros(hidden_nodes,1);
% 输出层
W2 = rand(2,hidden_nodes);
b2 = zeros(2,1);

% 训练
sumMSE = 1;
runCount = 0;
for j = 1:train_times
    sumMSE = 0;
    for i = 1:900
        % 前向传播
        target = train_target(i,:)';
        X = train(i,:)';
        layer1 = sigmoid(W1*X+b1);
        out = sigmoid(W2*layer1+b2);
        error = target-out;

        % 反向传播
        % 链式法则
        delta_out = -diag(out.*(1-out))*error;
        % delta_hide = (delta_out(1)*W2(1,:)+delta_out(2)*W2(2,:))'.*layer1.*(1-layer1);
        delta_hide = diag(layer1.*(1-layer1))*W2'*delta_out;

        % 更新权值
        update_W2 = delta_out*layer1'; 
        update_W1 = delta_hide*X';
        W2 = W2 - update_W2*learn_rate;
        W1 = W1 - update_W1*learn_rate;

        sumMSE = sumMSE + sum(error .* error);
    end
    sumMSE = sqrt(sumMSE) / 900;
    runCount = runCount + 1;
    total_loss(j) = sumMSE;
    fprintf("loss = %.4f\r\n",sumMSE);
end

% 测试
total_acc = 0;
for i = 1:100
    target = test_target(i,:)';
    X = test(i,:)';
    layer1 = sigmoid(W1*X+b1);
    out = sigmoid(W2*layer1+b2);
    if find(target==1)==find(out==max(out)) 
        total_acc = total_acc+1;
    end
end

% 输出准确率
total_acc = total_acc/100;
fprintf("acc : %.2f\r\n",total_acc);
plot(x_axis,total_loss);



