%***********************************************************
% CS 229 Machine Learning
% Project, Ground truth data generation
%-----------------------------------------------------------
% Date : November 13, 2010
%***********************************************************

clear all;

addpath(genpath('./KalmanAll/'))

%**************************************************************************
%Input parameters
outputwidth = 320;
outputheight = 180;
Wmax = 640;
Hmax = 360;
scale_conversion_factor_tracking = 1.5;

lecturer_tracking_scale_factor = 640/2560;
saliency_tracking_scale_factor = 640/1920;

load groundTruth.dat;
load lecturer.dat;
load saliency.dat;
load boards.dat;
%**************************************************************************


dataA = groundTruth;

% The required frame rate is 33 ms per frame. Our data contains observations 
% that are taken at time intervals that are multiples of 100 ms. Thus, we 
% need to interpolate these observations. 

size(dataA)

%--------------------------------------------------------------------------
% Interpolate data
interpolatedTimeStamps = (dataA(1,1):100/3:dataA(size(dataA,1),1));
newDataA = zeros(size(interpolatedTimeStamps',1),3);
newDataA(:,1) = interpolatedTimeStamps';
newDataA(:,2) = (interp1(dataA(:,1)', dataA(:,2)', interpolatedTimeStamps))'./scale_conversion_factor_tracking;
newDataA(:,3) = (interp1(dataA(:,1)', dataA(:,3)', interpolatedTimeStamps))'./scale_conversion_factor_tracking;

%--------------------------------------------------------------------------
%Adjustment for camera border case
newDataA(:,2) = min(max(floor(newDataA(:,2) - outputwidth/2),1),Wmax-outputwidth) + outputwidth/2;
newDataA(:,3) = min(max(floor(newDataA(:,3) - outputheight/2),1),Hmax-outputheight) +outputheight/2 ;
%--------------------------------------------------------------------------

%============================= Ignore this bit of code for now  ===========================
% TODO: May want to smoothen inputs before training

% Use values of x-coordinate to calculate acceleration

% Matrices that define the relation between position, velocity
% and acceleration

T = size(newDataA,1); % Total number of frames
deltaT = 100/3; % 33 ms
A = [1 deltaT; 0 1];
B = [deltaT * deltaT/2; deltaT];
C = [1 0];

% This will store the values of acceleration in px/ms^2
u_c = zeros(1,T+1); % a_c(0) to a_c(T)

multiplier = A * [0;0];

for t = 2:T
    u_c(t) = (newDataA(t,2) - (C * multiplier)) / (C * B);
    multiplier = A * (multiplier + B * u_c(t));
end

u_cs = smooth(u_c');

t=1:size(u_c,2);
plot(t,u_cs);
legend('Smooth');
print('graph2.png', '-dpng')

% TODO: Smoothing may happen here instead of above - Judging by the values I got here

%==========================================================================================

% For now, train and test using the groundth truth co-ordinates as training values instead of u_c
y = newDataA(:,2);
y = smooth(y,150);
% Prepare features first


size(lecturer)
size(saliency)
size(y)

% Scale vectors
%lecuturer feature adjustment
lecturer = lecturer * lecturer_tracking_scale_factor; 
lecturer(:,2) = min(max(floor(lecturer(:,2) - outputwidth/2),1),Wmax-outputwidth) + outputwidth/2;
lecturer(:,3) = min(max(floor(lecturer(:,3) - outputheight/2),1),Hmax-outputheight) +outputheight/2 ;

%saliency feature adjustment
saliency = saliency *  saliency_tracking_scale_factor;
%saliency(:,2) = saliency(:,2) + outputwidth/2;
%saliency(:,3) = saliency(:,3) + outputheight/2;

minFrameOffset = 1;
minError = 1e999;
trainMSEArr = []; 
testMSEArr = []; 
frame_offset_arr = [];

for frame_offset = 15700:50:16000

sample_step = max([1 round(frame_offset*0.01)]);
numFeatures = 2+frame_offset/sample_step*2*2;
numTrainPoints = 30000;
numTestPoints = 20000;

%Prepare training features
trainFeatures = PrepareFeatures(1, numTrainPoints, frame_offset,sample_step, lecturer, saliency  ) ;

%------------------------------------------------------------------------------------

% Train on the first numTrainPoints
trainOutput = y(1:numTrainPoints);
theta = glmfit(trainFeatures', trainOutput,'normal');

%Kalman learning
%[F, H, Q, R, initX, initV] = kl( (theta' * [ones(1,numTrainPoints);trainFeatures]),100);

%--------------------------------------------------------------------------

%Prepare training features
testFeatures = PrepareFeatures(numTrainPoints, numTestPoints, frame_offset,sample_step, lecturer, saliency  ) ;


%-------------------------------------------------------------------------------------
%Test/Prediction

   testOutput = theta' * [ones(1,numTestPoints);testFeatures];
%  F = [1 1 1/2; 0 1 1; 0 0 1];
%     H = [1 0 0];
% Q = 0.1*eye(3);
% R = 600000000000*eye(1);
% initX = [0 0 0]';
% initV = 500000000000*eye(3);
% 
% 
%  testOutput = ks(testOutput,F, H, Q, R, initX, initV); 
%  testOutput = H*testOutput;
 
 %testOutput = smooth(testOutput,500)';
 
%--------------------------------------------------------------------------

% Calculate the average error (in number of pixels)
errors = y(numTrainPoints+1:numTrainPoints+numTestPoints) - testOutput';

tracking_errors = y(numTrainPoints+1:numTrainPoints+numTestPoints) - lecturer(numTrainPoints+1:numTrainPoints+numTestPoints,2);
sal_errors = y(numTrainPoints+1:numTrainPoints+numTestPoints) - saliency(numTrainPoints+1:numTrainPoints+numTestPoints,2);
MSE = mean(sum(errors.*errors));
salMSE = mean(sum(sal_errors.*sal_errors));
trackingMSE = mean(sum(tracking_errors.*tracking_errors));


t=1:numTestPoints;

figure(2)
plot(t, lecturer(numTrainPoints+1:numTrainPoints+numTestPoints,2), ...
    t, saliency(numTrainPoints+1:numTrainPoints+numTestPoints,2),...
    t,testOutput,t,y(numTrainPoints+1:numTrainPoints+numTestPoints));

title(['Errors: ' num2str(MSE) ' Sal Errors: ' num2str(salMSE) ' Tracking Errors: ' num2str(trackingMSE)]);
legend('Lecturer Traj','Saliency Traj','Estimiated Traj','Actual');
print('graph4.png', '-dpng')

figure(1)
plot(t,testOutput,t,y(numTrainPoints+1:numTrainPoints+numTestPoints),'LineWidth',1.8,'MarkerSize',8);
grid on;
%title({['Predicted and actual camera trajectory for'  num2str(numTestPoints) ' frames' ];['MSE: ' num2str(MSE)] });
fignam = 'Prelim_learning_result';
saveas(gcf,[fignam '.eps'], 'psc2')
system( ['epstopdf ' fignam '.eps']);

if(MSE<minError)    
    minError = MSE
    minFrameOffset = frame_offset
end

%--------------------------------------------------------------------------

%Training Errror
%-------------------------------------------------------------------------------------
%Test/Prediction

   trainOutput = theta' * [ones(1,numTrainPoints);trainFeatures];
%  F = [1 1 1/2; 0 1 1; 0 0 1];
%     H = [1 0 0];
% Q = 0.1*eye(3);
% R = 600000000000*eye(1);
% initX = [0 0 0]';
% initV = 500000000000*eye(3);
%  testOutput = ks(testOutput,F, H, Q, R, initX, initV); 
%  testOutput = H*testOutput;
 
 %testOutput = smooth(testOutput,500)';
 
%--------------------------------------------------------------------------

% Calculate the average error (in number of pixels)
train_errors = y(1:numTrainPoints) - trainOutput';
train_MSE = mean(sum(train_errors.*train_errors));
t=1:numTrainPoints;

figure(3)
plot(t, lecturer(1:numTrainPoints,2), ...
    t, saliency(1:numTrainPoints,2),...
    t,trainOutput,...
    t,y(1:numTrainPoints));

title(['Errors: ' num2str(train_MSE) ' Sal Errors: ' num2str(salMSE) ' Tracking Errors: ' num2str(trackingMSE)]);
legend('Lecturer Traj','Saliency Traj','Estimiated Traj','Actual');

figure(4)
plot(t,trainOutput,t,y(1:numTrainPoints),'LineWidth',1.8,'MarkerSize',8);
grid on;
%title({['Predicted and actual camera trajectory for'  num2str(numTestPoints) ' frames' ];['MSE: ' num2str(MSE)] });
fignam = 'Prelim_learning_result';
saveas(gcf,[fignam '.eps'], 'psc2')
system( ['epstopdf ' fignam '.eps']);

if(MSE<minError)    
    minError = MSE
    minFrameOffset = frame_offset
end

trainMSEArr = [trainMSEArr;  train_MSE]; 
testMSEArr = [testMSEArr;  MSE]; 
frame_offset_arr = [frame_offset_arr; frame_offset;];

end
figure(5)
plot(frame_offset_arr,testMSEArr,frame_offset_arr,trainMSEArr);
legend('Testing error','Training error');
