%***********************************************************
% CS 229 Machine Learning
% Project, Ground truth data generation
%-----------------------------------------------------------
% Date : November 26, 2010
%***********************************************************

clear all;

addpath(genpath('./LIBSVM/'))
addpath(genpath('./KalmanAll/'))

%**************************************************************************
%Input parameters

% Camera dimensions
%outputwidth = 320;
%outputheight = 180;
%Wmax = 640;
%Hmax = 360;

% Scales conversions
scale_conversion_factor_tracking = 1.5;
lecturer_tracking_scale_factor = 640/2560;
saliency_tracking_scale_factor = 640/1920;
board_scale_factor = 640/1920;

num_points = 50000;

% Kalman smoothing parameters
ground_truth_smooth_factor = 150;
final_output_smooth_factor = 250;

% Parameters for panning label prediction
frame = 500; % Sliding window size
numsteps = 5;
step = round(frame/numsteps); % Step size.
derivative_range = 30:30:150;

% Parameters for trajectory regression
frame_offset = 10;
sample_step = frame_offset/10;

% SVM Parameters
log2c = 0.95;
log2g = 1.80;

% Panning noise removal parameters
zoom_label_trough_width = 300;
zoom_label_spike_width = 700;

% Rectification look ahead parameter
rectify_future_max = 1000;

load groundTruth.dat;
load lecturer.dat;
load saliency.dat;
load boards.dat;
load zoom_ground_truth.mat;
load faceDetection.dat;
load trajPrediction_all.mat;
load panPrediction_all.mat;
%**************************************************************************
% Convert filenames to internal variables
% saliency = saliency22;
% boards = boards22;
% lecturer = lecturer22;
% faceDetection = faceDetection;

dataA = groundTruth;

% The required frame rate is 33 ms per frame. Our data contains observations
% that are taken at time intervals that are multiples of 100 ms. Thus, we
% need to interpolate these observations.

size(dataA)

%--------------------------------------------------------------------------
% Interpolate data
interpolatedTimeStamps = (dataA(1,1):100/3:dataA(size(dataA,1),1));
newDataA = zeros(size(interpolatedTimeStamps',1),3);
newDataA(:,1) = interpolatedTimeStamps';
newDataA(:,2) = (interp1(dataA(:,1)', dataA(:,2)', interpolatedTimeStamps))'./scale_conversion_factor_tracking+10;
newDataA(:,3) = (interp1(dataA(:,1)', dataA(:,3)', interpolatedTimeStamps))'./scale_conversion_factor_tracking+10;

%--------------------------------------------------------------------------
%Adjustment for camera border case
%newDataA(:,2) = min(max(floor(newDataA(:,2) - outputwidth/2),1),Wmax-outputwidth) + outputwidth/2;
%newDataA(:,3) = min(max(floor(newDataA(:,3) - outputheight/2),1),Hmax-outputheight) +outputheight/2 ;
%--------------------------------------------------------------------------

%==========================================================================================
%Prepare features


y = newDataA(:,2);
y = smooth(y,ground_truth_smooth_factor);
l = zoom_ground_truth;
% y,l are the ground truth

%calculate board center in x-direction
board_center_x = (boards(:,1) + boards(:,2))/2;
board_center_x = board_center_x*board_scale_factor;

% Prepare features first

% Scale vectors
%lecuturer feature adjustment
lecturer = lecturer * lecturer_tracking_scale_factor;
%lecturer(:,2) = min(max(floor(lecturer(:,2) - outputwidth/2),1),Wmax-outputwidth) + outputwidth/2 ;
%lecturer(:,3) = min(max(floor(lecturer(:,3) - outputheight/2),1),Hmax-outputheight) +outputheight/2  ;

%saliency feature adjustment
saliency = saliency *  saliency_tracking_scale_factor;

% Pre-process face data
% The data is quite noisy. We correct this in two steps in the same way as
% for the predicted panning label.
% 1. Go over the data once and change all groups of -1's which are thinner
% than pan_label_trough_width frames in width to 1.
% 2. Now, go over the data again, and remove all groups of 1's that are
% less than pan_label_spike_width frames in width.
% 
% last_one = 0;
% for i = 1:size(faceDetection, 1)
%     if (faceDetection(i,1) == 1)
%         if (last_one > 0 && (i - last_one) <= 100)
%             faceDetection(last_one:i,1) = 1;
%         end
%         last_one = i;
%     end
% end
% 
% last_minus_one = 0;
% for i = 1:size(faceDetection, 1)
%     if (faceDetection(i,1) == 0)
%         if (last_minus_one > 0 && (i - last_minus_one) <= 500)
%             faceDetection(last_minus_one:i,1) = 0;
%         end
%         last_minus_one = i;
%     end
% end

% for i = 1:size(faceDetection, 1)
%     if (faceDetection(i,1) == 0)
%        faceDetection(i,1) = -1;
%     end
% end


% Prepare all features
X_zoom = PrepareZoomFeatures(1, num_points, frame, step, derivative_range, lecturer, saliency, boards, board_scale_factor, trajPrediction, panPrediction);
X_zoom = X_zoom';
% X_reg = PrepareFeatures(1, num_points, frame_offset, sample_step, lecturer, saliency, board_center_x);
% X_reg = X_reg';

% MSE values:
% MSE_before_panning_filter = 0;
% MSE_after_panning_filter = 0;
% MSE_final = 0;
% sal_MSE = 0;
% tracking_MSE = 0;

fmeasure_total = 0;
prec_total = 0;
recall_total = 0;

% Cross validtion ranges
train_range = [1:40000; 1:30000 40001:50000; 1:20000 30001:50000; 1:10000 20001:50000; 10001:50000];
test_range = [40001:50000; 30001:40000; 20001:30000; 10001:20000; 1:10000];

% Code testing ranges
%train_range = [1:10000 20001:50000];
%test_range = [10001:20000];



for seq_range=1:size(train_range,1)
    
    training_seq = train_range(seq_range,:);
    testing_seq = test_range(seq_range,:);
    
    % Step 1: Train the SVM to assign a panning label
    
    tic
    cmd = ['-t 2 -c ', num2str(2^log2c), ' -g ', num2str(2^log2g)];
    model =  svmtrain(l(training_seq), X_zoom(training_seq,:) ,cmd);
    
    [predicted_zoom_label, accuracy, decision_values] = svmpredict(l(testing_seq), X_zoom(testing_seq,:), model);
    
    t = testing_seq;
    figure(10*seq_range + 1)
    plot(t,70*l(t),t,50*predicted_zoom_label, t,y(t), t, trajPrediction(t));
    legend('Ground truth zoom','Zoom Pred. before post-proc','Actual Trajectory', 'Our Prediction')
    %title(['Precision:' num2str(precision) ' recall: ' num2str(recall) ' accuracy: ' num2str(accuracy) ]);
    
    % The predicted zoom labels are quite noisy in places where we expect a
    % '1'. Level these oscillations off to '1'. We do this in two steps:
    % 1. Go over the data once and change all groups of -1's which are thinner
    % than zoom_label_trough_width frames in width to 1.
    % 2. Now, go over the data again, and remove all groups of 1's that are
    % less than zoom_label_spike_width frames in width.
    
    
    last_one = 0;
    for i = 1:size(predicted_zoom_label, 1)
        if (predicted_zoom_label(i,1) == 1)
            if (last_one > 0 && (i - last_one) <= zoom_label_trough_width)
                predicted_zoom_label(last_one:i,1) = 1;
            end
            last_one = i;
        end
    end
    
    last_minus_one = 0;
    for i = 1:size(predicted_zoom_label, 1)
        if (predicted_zoom_label(i,1) == -1)
            if (last_minus_one > 0 && (i - last_minus_one) <= zoom_label_spike_width)
                predicted_zoom_label(last_minus_one:i,1) = -1;
            end
            last_minus_one = i;
        end
    end
    
    
    % Rectify according to predicted panning label
    panPred_short = panPrediction(testing_seq);
    for i = 1:size(predicted_zoom_label, 1)
        if (panPred_short(i) == 1)
            predicted_zoom_label(i) = -1;
        end
    end
    
    % Now remove spikes again - These were introduced by rectification
    last_minus_one = 0;
    for i = 1:size(predicted_zoom_label, 1)
        if (predicted_zoom_label(i,1) == -1)
            if (last_minus_one > 0 && (i - last_minus_one) <= zoom_label_spike_width)
                predicted_zoom_label(last_minus_one:i,1) = -1;
            end
            last_minus_one = i;
        end
    end
    
    [precision, recall, accuracy] = computePrecisionRecall(predicted_zoom_label, l(testing_seq))
    
    toc
   
    if (seq_range == 1)
        save('zoomPrediction1.mat','predicted_zoom_label');
    elseif (seq_range == 2)
        save('zoomPrediction2.mat','predicted_zoom_label');
    elseif (seq_range == 3)
        save('zoomPrediction3.mat','predicted_zoom_label');
    elseif (seq_range == 4)
        save('zoomPrediction4.mat','predicted_zoom_label');
    else
        save('zoomPrediction5.mat','predicted_zoom_label');
    end
    
    fmeasure = 2 *precision * accuracy /(precision + accuracy);
    fmeasure_total = fmeasure_total + fmeasure;
    prec_total = prec_total + precision;
    recall_total = recall_total + recall;
    
    t=testing_seq;
    
    figure(10*seq_range + 2)
    plot(t,70*l(t),t,50*predicted_zoom_label, t,y(t), t, trajPrediction(t), t, panPrediction(t));
    legend('Ground truth zoom','Zoom Prediction','Actual Trajectory', 'Our prediction', 'Our pan pred')
    title(['Precision:' num2str(precision) ' recall: ' num2str(recall) ' accuracy: ' num2str(accuracy) ]);
end

prec_total = prec_total/5
recall_total = recall_total/5
fmeasure_total = fmeasure_total/5

zoomPrediction = zeros(50000,1);

load zoomPrediction5.mat;
zoomPrediction(1:10000) = predicted_zoom_label;
load zoomPrediction4.mat;
zoomPrediction(10001:20000) = predicted_zoom_label;
load zoomPrediction3.mat;
zoomPrediction(20001:30000) = predicted_zoom_label;
load zoomPrediction2.mat;
zoomPrediction(30001:40000) = predicted_zoom_label;
load zoomPrediction1.mat;
zoomPrediction(40001:50000) = predicted_zoom_label;

save('zoomPrediction_all.mat','zoomPrediction');

%calculate all MSEs
% sal_MSE = sal_MSE./size(train_range,1)
% tracking_MSE = tracking_MSE./size(train_range,1)
% MSE_before_panning_filter = MSE_before_panning_filter./size(train_range,1)
% MSE_after_panning_filter = MSE_after_panning_filter./size(train_range,1)
% MSE_final = MSE_final./size(train_range,1)