% load data
clc;
close all;
% [val, text] = xlsread('data_lsLNH.xlsx',1); % load weekly data
load '../data/lsLNH.mat' %load data weekly

rng('default');

% ==========INIT PARAMS
% ===== create lagged X_t ====
P = 8; %lag delta_R
Q = 4; %lag gap
K = 1; %lag R
timeTrend = true; % X_t include time trend (alpha*t)
autoscale = true;  % scale X_t value into [-1 1]


% ===== create Yt_h ====
h = 1;          % forecast horizon
ratio = 1/5;    % divide data to training and test set

% ===== first param for model ====
kernel = 2; % rbf = 2

%%

% data processsing
ls1W = val(:,1); rls1W = ls1W(2:end) - ls1W(1:end-1);
ls1M = val(:,2); rls1M = ls1M(2:end) - ls1M(1:end-1);
ls3M = val(:,3); rls3M = ls3M(2:end) - ls3M(1:end-1);
gap1M1W = ls1M - ls1W;
gap3M1M = ls3M - ls1M;

% Yt_h1 = ls1M(1+h:end) - ls1M(1:end-h);
% Yt_h = Yt_h1(2:end);

% first we consider the following predictors: EMA, delta_laisuat_1M, gap[1M]-gap[1W]
% EMA4_1M =funcEMA(ls1M,4);
% EMA12_1M = funcEMA(ls1M,12);
% Xt = [gap1M1W(2:end-h) rls1M(1:end-h) EMA4_1M(2:end-h) EMA12_1M(2:end-h)];
% Xt = [gap3M1M(2:end-h) rls1M(1:end-h) EMA4_1M(2:end-h)];

% ======create a list of lagged variables for regression========
% r_{t+h} = F ( r_t, r_{t-1} ... r_{t-K} )
lagDeltaR = lagmatrix(rls1M,[-h 0:1:P]);
lagR = lagmatrix(ls1M,0:K);
lagGap = lagmatrix(gap3M1M,0:1:Q);
tempX = [lagDeltaR(:,1), lagDeltaR(:,2:end), lagGap(2:end,:), lagR(2:end,:)];
YX = tempX(max([P,Q,K])+1:end-h,:);
datenew = dates(max([P,Q,K])+1:end-h);
Yt_h = (YX(:,1)>0) - (YX(:,1)<0) + 2;
Xt = YX(:,2:end);


[N, D] = size(Xt);

Ntest = ceil(N*ratio);
Ntrain = N - Ntest;


if(timeTrend)
    Xtrain_t = [(1:Ntrain)',Xt(1:Ntrain,:)];
    Xtest = [(1:Ntest)', Xt(Ntrain+1:end,:)];
    D = D + 1;
else
    Xtrain_t = [(1:Ntrain)',Xt(1:Ntrain,:)];
    Xtest = [(1:Ntest)', Xt(Ntrain+1:end,:)]; 
end
Ytrain_t = Yt_h(1:Ntrain);
Ytest = Yt_h(Ntrain+1:end);


%% scaling data
% % simple scaling;
minimums = min(Xtrain_t, [], 1);
ranges = max(Xtrain_t, [], 1) - minimums;

% minY = min(Ytrain_t, [], 1);
% rangesY = max(Ytrain_t, [], 1) - minY;


if(autoscale)
    XtrainScale_t = (Xtrain_t - repmat(minimums, size(Xtrain_t, 1), 1)) ./ repmat(ranges, size(Xtrain_t, 1), 1);
    % YtrainScale_t = (Ytrain_t - repmat(minY, size(Ytrain_t, 1), 1)) ./ repmat(rangesY, size(Ytrain_t, 1), 1);

    %note: the scaling is the same way, but we need to stored the minimums and ranges in the training data    
    Xscale_test = (Xtest - repmat(minimums, size(Xtest, 1), 1)) ./ repmat(ranges, size(Xtest, 1), 1);
    % Yscale_test = (Ytest - repmat(minY, size(Ytest, 1), 1)) ./ repmat(rangesY, size(Ytest, 1), 1);
    Yscale_test = Ytest;
    YtrainScale_t = Ytrain_t;
else
    XtrainScale_t = Xtrain_t;
    YtrainScale_t = Ytrain_t;
    Xscale_test = Xtest;    
    Yscale_test = Ytest;
end

%% #######################
% Automatic Cross Validation 
% Parameter selection using n-fold cross validation
% #######################

stepSize = 5;
bestLog2c = 0;
bestLog2g = log2(1/D);
epsilon = 0.005;
bestcv = 0;
Ncv = 5; % Ncv-fold cross validation cross validation
deltacv = 10^6;
Nlimit = 1000;
cnt = 1;
breakLoop = 0;


while abs(deltacv) > epsilon && cnt < Nlimit
    bestcv_prev = bestcv;
    prevStepSize = stepSize;
    stepSize = prevStepSize/2;
    log2c_list = bestLog2c-prevStepSize: stepSize: bestLog2c+prevStepSize;
    log2g_list = bestLog2g-prevStepSize: stepSize: bestLog2g+prevStepSize;
    
    numLog2c = length(log2c_list);
    numLog2g = length(log2g_list);
    cvMatrix = zeros(numLog2c,numLog2g);
    
    for i = 1:numLog2c
        log2c = log2c_list(i);
        for j = 1:numLog2g
            log2g = log2g_list(j);
%             % With some kernel
%             cmd = ['-q -c ', num2str(2^log2c), ' -g ', num2str(2^log2g),' -t ', num2str(kernel)];
%             cv = get_cv_ac(YtrainScale_t, [(1:Ntrain)' XtrainScale_t*XtrainScale_t'], cmd, Ncv);
            % With some precal kernel
            cmd = ['-q -c ', num2str(2^log2c), ' -g ', num2str(2^log2g)];
            cv = get_cv_ac(YtrainScale_t, XtrainScale_t, cmd, Ncv);
            if (cv >= bestcv),
                bestcv = cv; bestLog2c = log2c; bestLog2g = log2g;
                bestc = 2^bestLog2c; bestg = 2^bestLog2g;
            end
            disp(['So far, cnt=',num2str(cnt),' the best parameters, yielding Accuracy=',num2str(bestcv*100),'%, are: C=',num2str(bestc),', gamma=',num2str(bestg)]);
            % Break out of the loop when the cnt is up to the condition
            if cnt > Nlimit, breakLoop = 1; break; end
            cnt = cnt + 1;
        end
        if breakLoop == 1, break; end
    end
    if breakLoop == 1, break; end
    deltacv = bestcv - bestcv_prev;
    
end
disp(['The best parameters, yielding Accuracy=',num2str(bestcv*100),'%, are: C=',num2str(bestc),', gamma=',num2str(bestg)]);


%%
% #######################
% Train the SVM in one-vs-rest (OVR) mode
% #######################
% % With specific kernel
bestParam = ['-q -c ', num2str(bestc), ', -g ', num2str(bestg),' -t ', num2str(kernel)];
% model = ovrtrainBot(YtrainScale_t, [(1:Ntrain)' XtrainScale_t*XtrainScale_t'], bestParam);
% bestParam = ['-q -c 100 -g 2'];
% Without specific kernel
% bestParam = ['-q -c ', num2str(bestc), ' -g ', num2str(bestg)];
model = ovrtrainBot(YtrainScale_t, XtrainScale_t, bestParam);
% bestParam = ['-q -c 8 -g 0.0625'];
% #######################
% Classify samples using OVR model
% #######################
% With specific kernel
% [predict_label, accuracy, decis_values] = ovrpredictBot(Yscale_test, [(1:Ntest)' Xscale_test*Xscale_test'], model);
% [decis_value_winner, label_out] = max(decis_values,[],2);
%
% With default kernel
[predict_label, accuracy, decis_values] = ovrpredictBot(Yscale_test, Xscale_test, model);
[decis_value_winner, label_out] = max(decis_values,[],2);


% #######################
% Make confusion matrix
% #######################
[confusionMatrix,order] = confusionmat(Yscale_test,label_out);
confusionMatrixPerc = confusionMatrix./Ntest;
% Note: For confusionMatrix
% column: predicted class label
% row: ground-truth class label
% But we need the conventional confusion matrix which has
% column: actual
% row: predicted
figure; imagesc(confusionMatrixPerc');colorbar;
xlabel('actual class label');
ylabel('predicted class label');
totalAccuracy = trace(confusionMatrix)/Ntest;
disp(['Total accuracy from the SVM: ',num2str(totalAccuracy*100),'%']);
