function [W] = Ddavid_MPU_stochastic_gradient_descent_training_single(DataTraining, SingleSampledLabel, IterN, Alpha, C)

N = size(DataTraining, 1);
M = size(DataTraining, 2);

% Shuffle the data
Data = [DataTraining SingleSampledLabel];
Data = Data(randperm(size(Data, 1)), :);
X = Data(:, 1:M);
Y = Data(:, (M + 1));

W = 0.001 * ones(1, M);
Log = zeros(N, IterN);

for Iter = 1:IterN
    for i = 1:N
        xi = X(i, :);
        yi = Y(i, :);
        
        Hypothesis = Ddavid_MPU_h_function(W, xi, yi, C);
        W = W - Alpha * (Hypothesis - yi) * xi;
    end
end
