
% load data
clc;close all;rng('default');
% [val, text] = xlsread('data_lsLNH.xlsx',1); % load weekly data
load '../data/lsLNH.mat' %load data weekly

% ==========INIT PARAMS
% ===== create lagged X_t ====
P = 3; %lag delta_R
Q = 1; %lag gap
K = 0; %lag R
timeTrend = true; % X_t include time trend (alpha*t)
autoscale = true;  % scale X_t value into [-1 1]


% ===== create Yt_h ====
h = 1;          % forecast horizon
ratio = 1/5;    % divide data to training and test set

% ===== first param for model ====
kernel = 2; % rbf = 2

%%

% data processsing
ls1W = val(:,1); rls1W = ls1W(2:end) - ls1W(1:end-1);
ls1M = val(:,2); rls1M = ls1M(2:end) - ls1M(1:end-1);
ls3M = val(:,3); rls3M = ls3M(2:end) - ls3M(1:end-1);
gap1M1W = ls1M - ls1W;
gap3M1M = ls3M - ls1M;

% Yt_h1 = ls1M(1+h:end) - ls1M(1:end-h);
% Yt_h = Yt_h1(2:end);

% first we consider the following predictors: EMA, delta_laisuat_1M, gap[1M]-gap[1W]
% EMA4_1M =funcEMA(ls1M,4);
% EMA12_1M = funcEMA(ls1M,12);
% Xt = [gap1M1W(2:end-h) rls1M(1:end-h) EMA4_1M(2:end-h) EMA12_1M(2:end-h)];
% Xt = [gap3M1M(2:end-h) rls1M(1:end-h) EMA4_1M(2:end-h)];

% ======create a list of lagged variables for regression========
% r_{t+h} = F ( r_t, r_{t-1} ... r_{t-K} )
lagDeltaR = lagmatrix(rls1M,[-h 0:1:P]);
lagR = lagmatrix(ls1M,0:K);
lagGap = lagmatrix(gap3M1M,0:1:Q);
tempX = [lagDeltaR(:,1), lagDeltaR(:,2:end), lagGap(2:end,:), lagR(2:end,:)];
YX = tempX(max([P,Q,K])+1:end-h,:);
datenew = dates(max([P,Q,K])+1:end-h);
Yt_h = (YX(:,1)>0) - (YX(:,1)<0) + 2;
Xt = YX(:,2:end);


[N, D] = size(Xt);

Ntest = ceil(N*ratio);
Ntrain = N - Ntest;

if(timeTrend)
    Xtrain_t = [(1:Ntrain)',Xt(1:Ntrain,:)];
    Xtest = [(1:Ntest)', Xt(Ntrain+1:end,:)];
    D = D + 1;
else
    Xtrain_t = [(1:Ntrain)',Xt(1:Ntrain,:)];
    Xtest = [(1:Ntest)', Xt(Ntrain+1:end,:)]; 
end
Ytrain_t = Yt_h(1:Ntrain);
Ytest = Yt_h(Ntrain+1:end);


%% simple scaling [a,b] -> [-1,1]

if(autoscale)
    [XtrainScale_t,minimums,ranges] = featureNormalize(Xtrain_t);   
    % Y scaling is the same way, but we need to stored the minimums and ranges in the training data    
    [Xscale_test,~,~] = featureNormalize(Xtest,minimums,ranges);
    
    Yscale_test = Ytest;
    YtrainScale_t = Ytrain_t;
else
    XtrainScale_t = Xtrain_t;
    YtrainScale_t = Ytrain_t;
    Xscale_test = Xtest;    
    Yscale_test = Ytest;
end

classY = unique(YtrainScale_t);
NClass = length(classY);
%%

rng(1);
[bestLog2c,bestLog2g,bestcv] = autogrid(YtrainScale_t,XtrainScale_t);


cmd = sprintf('-b 1 -g %f -c %f',[2^bestLog2g,2^bestLog2c]);
bestModel = svmtrain2(YtrainScale_t,XtrainScale_t,cmd);

[predictedLabel, accuracy, decisValueWinner] = svmpredict(Yscale_test, Xscale_test, bestModel, '-b 1'); % run the SVM model on the test data

% #######################
% Make confusion matrix for the overall classification
% #######################
[confusionMatrixAll,orderAll] = confusionmat(Yscale_test,predictedLabel);
figure; imagesc(confusionMatrixAll');
xlabel('actual class label');
ylabel('predicted class label');
title(['confusion matrix for overall classification']);
% Calculate the overall accuracy from the overall predicted class label
accuracyAll = trace(confusionMatrixAll)/sum(confusionMatrixAll(:));
disp(['Total accuracy is ',num2str(accuracyAll*100),'%']);
%%

numSV = bestModel.nSV; % size = the number of classes
numSV_end = cumsum(numSV);
numSV_begin = numSV_end-numSV+1;


cnt = 1;
W = zeros(NClass*(NClass-1)/2,D);
B = zeros(NClass*(NClass-1)/2,1);
for c1 = 1:NClass
    for c2 = (c1+1):NClass
        % the weight of class c1 vs class c2
        coef = [bestModel.sv_coef(numSV_begin(c1):numSV_end(c1),c2-(c1<c2));...
                bestModel.sv_coef(numSV_begin(c2):numSV_end(c2),c1-(c2<c1))];
        SVs = [bestModel.SVs(numSV_begin(c1):numSV_end(c1),:);...
               bestModel.SVs(numSV_begin(c2):numSV_end(c2),:)];
        w = SVs'*coef;
        
        % This is how to convert (c1,c2) to the order of (1,2) (1,3) (1,4)
        % (2,3) (2,4)
        tmp = zeros(NClass,NClass);
        tmp(c1,c2) = 1;
        tmp(c2,c1) = 1;
        
        % Get the bias term
        b = -bestModel.rho(squareform(tmp)==1);
        
        % Store the weight matrix W and the bias matrix B
        W(cnt,:) = w(:)';
        B(cnt,:) = b;

        cnt = cnt + 1;
        
    end
end
