classdef SNN < handle
% 全连接, 反向传播, sigmoid 激活, mse 误差, 梯度下降优化 神经网络实现
% data format:
%     X, input matrix [sample_number x feature_number]
%     y, output vector [sample_number x 1]
% example:
%     hidden_layer = 2;
%     alpha = 10:-0.01:0.01;
%     lambda = 0.1;
%     tol = 1e-3;
%     nn = SNN(hidden_layer, alpha, lambda, tol);
%     nn.fit(Xtrain, ytrain);
%     nn.predict(Xtest)
    
    properties
        % 隐藏层数目
        hidden_layer
        % 权重矩阵
        W
        % 输出矩阵
        A
        % 损失对每层的偏导矩阵
        Delta        
        % 样本输出
        Y
        % 损失对权重的偏导矩阵
        P
        % 迭代次数
        iter
        % 提前停止损失
        tol
        % 学习率
        alpha
        % 正则化参数
        lambda
        % 均方误差
        J
        % 迭代误差
        JHis
    end
    
    methods
        function obj = SNN(hidden_layer, alpha, lambda, tol)
            % 参数判断
            if nargin < 4
                obj.tol = 1e-4;
            else
                obj.tol = tol;
            end
            if nargin < 3
                obj.lambda = 0;
            else
                obj.lambda = lambda;
            end
            if nargin < 2
                obj.alpha = 0.01 * ones(1000, 1);
                obj.iter = 1000;
            else
                obj.alpha = alpha;
                obj.iter = length(alpha);
            end
            obj.hidden_layer = hidden_layer;
        end
        
        % 训练
        function fit(obj, X, y)
            % 向量 y 转矩阵 Y
            my = length(y);
            uy = unique(y);
            ny = length(uy);
            obj.Y = zeros(my, ny);
            for k = 1: ny
                obj.Y(:, k) = (y == uy(k));
            end
            
            % 分配内存
            [m, n] = size(X);
            obj.A{1} = [ones(m, 1), X];
            for k = 1: obj.hidden_layer
                obj.W{k} = rand(n, n + 1) - 0.5;
                obj.A{k + 1} = ones(m, n + 1);
            end
            obj.W{k + 1} = rand(ny, n + 1) - 0.5;
            obj.A{k + 2} = zeros(my, ny);
            obj.P = obj.W;
            obj.Delta = obj.A;
            
            % 梯度下降
            obj.fp();
            obj.JHis(1) = obj.J;
            for k = 1: obj.iter
                if obj.J < obj.tol
                    break;
                else
                    obj.bp();
                    obj.gradDesc(obj.alpha(k));
                    obj.fp();
                    obj.JHis(k + 1) = obj.J;
                end
            end
        end
        
        % 前向传播
        function fp(obj)
            m = size(obj.A{1}, 1);
            for k = 1: obj.hidden_layer
                obj.A{k + 1} = [ones(m, 1), logsig(obj.A{k} * (obj.W{k})')];
            end
            obj.A{k + 2} = logsig(obj.A{k + 1} * (obj.W{k + 1})');
            d = obj.A{k + 2} - obj.Y;
            % 均方误差
            obj.J = 1 / (2 * m) * d(:)' * d(:);
            % 正则化项
            r = 0;
            for l = 1: length(obj.W)
                r = r + obj.W{l}(:)' * obj.W{l}(:);
            end
            % 损失
            obj.J = obj.J + r * obj.lambda / (2 * m);
        end
        
        % 反向传播
        function bp(obj)
            m = size(obj.Y, 1);
            l = obj.hidden_layer + 2;
            % 对每层的偏导
            obj.Delta{l} = 1 / m * (obj.A{l} - obj.Y) .* (obj.A{l} .* (1 - obj.A{l}));
            obj.Delta{l - 1} = obj.Delta{l} * obj.W{l - 1} .* (obj.A{l - 1} .* (1 - obj.A{l - 1}));
            if obj.hidden_layer > 1
                for k = l - 2: -1: 2
                    obj.Delta{k} = obj.Delta{k + 1}(:, 2:end) * obj.W{k} .* obj.A{k} .* (1 - obj.A{k});
                end
            end
            % 对参数的偏导
            for k = 1: obj.hidden_layer
                obj.P{k} = (obj.Delta{k + 1}(:, 2:end))' * obj.A{k};
                % 加入正则化项的偏导
                obj.P{k} = obj.P{k} + obj.lambda / m  * obj.W{k};
            end
            obj.P{k + 1} = (obj.Delta{k + 2})' * obj.A{k + 1} + obj.lambda / m  * obj.W{k + 1};
        end
        
        % 梯度下降
        function gradDesc(obj, alpha)
            w = [];
            p = [];
            for k = 1: obj.hidden_layer + 1
                w = [w; obj.W{k}(:)];
                p = [p; obj.P{k}(:)];
            end
            w = w - alpha * p;
            for k = 1: obj.hidden_layer + 1
                [m, n] = size(obj.W{k});
                obj.W{k} = reshape(w(1: m * n), m, n);
                w = w(m * n + 1: end);
            end
        end
    
        % 预测
        function index = predict(obj, X)
            for k = 1: obj.hidden_layer + 1
                m = size(X, 1);
                X = [ones(m, 1), X];
                X = logsig(X * (obj.W{k})');
            end
            [value, index] = max(X, [], 2);
        end
        
    end
    
end