function [positions, fps] = tracker(params)

% [positions, fps] = color_tracker(params)

% parameters
padding = params.padding;
output_sigma_factor = params.output_sigma_factor;
sigma = params.sigma;
lambda = params.lambda;
learning_rate = params.learning_rate;
features = params.features;
scale_rot_features = params.scale_rot_features;

video_path = params.video_path;
img_files = params.img_files;
pos = floor(params.init_pos);
target_sz = floor(params.wsize);
base_target_sz = target_sz;

visualization = params.visualization;
hog_params = params.hog_params;

num_frames = numel(img_files);

% window size, taking padding into account
window_sz = floor(target_sz * (1 + padding));
model_sz = window_sz;
mag = size(window_sz, 1)/(2*log(sqrt(sum(size(window_sz).^2)/4)));

% desired output (gaussian shaped), bandwidth proportional to target size
if features.hog || features.hogcr
    cell_size = hog_params.cell_size;
else
    cell_size = 1;
end
output_sigma = sqrt(prod(target_sz)) * output_sigma_factor ./ cell_size;
y = gaussian_shaped_labels(output_sigma, floor(window_sz / cell_size));
yf = fft2(y);

sz = floor([180 360]/4);
[rs, cs] = ndgrid((1:sz(1)) - floor(sz(1)/2), (1:sz(2)) - floor(sz(2)/2));
ysr = exp(-0.5/20^2*(rs.^2+cs.^2));
ysrf = fft2(ysr);

% store pre-computed cosine window
cos_window = hann(size(yf, 1)) * hann(size(yf, 2))';

% to calculate precision
positions = zeros(numel(img_files), 4);
psr_confs = zeros(num_frames, 1);
apce_confs = zeros(num_frames, 1);

% initialize the target histogram and context histogram
cr_params.target_hist = [];
cr_params.context_hist = {};
new_model = true;
scale_factor = 1;
rot = 0;

% to calculate fps
time = 0;

for frame = 1:num_frames,
    % load image
    im = imread([video_path img_files{frame}]);
    
    tic;
    
    if frame > 1
        patch = get_subwindow(im, pos, window_sz, model_sz);
        %patch = get_affine_subwindow(im, pos, scale_factor, rot, window_sz);
        z = get_features(patch, features, hog_params, cr_params, cos_window);
        zf = fft2(z);
        kzf = gaussian_correlation(zf, model_xf, sigma);
        response = real(ifft2(alphaf_num .* kzf ./ alphaf_den));
        apce = APCE(response);
        apce_confs(frame) = apce;
        psr_confs(frame) = PSR(response, 0.15);
        %response = real(ifft2(model_alphaf .* kzf));
        
        % target location is at the maximum response
        [vert_delta, horiz_delta] = find(response == max(response(:)), 1);
        if vert_delta > size(zf, 1)/2
            vert_delta = vert_delta - size(zf, 1);
        end
        if horiz_delta > size(zf, 2)/2
            horiz_delta = horiz_delta - size(zf, 2);
        end
        pos = pos + (cell_size * scale_factor * [vert_delta - 1, horiz_delta - 1]);
        
    end
    
    [cr_params.target_hist, cr_params.context_hist] = update_histogram_model2(im, pos, target_sz, 0.07, new_model, cr_params.target_hist);
    %tg_hist2 = get_target_hist(im, pos, target_sz);
    % extract the feature map of the local image patch to train the classifer
    patch = get_subwindow(im, pos, window_sz, model_sz);
    %patch = get_affine_subwindow(im, pos, scale_factor, rot, window_sz);
    x = get_features(patch, features, hog_params, cr_params, cos_window);
    xf = fft2(x);
    % calculate the new classifier coefficients
    kf = gaussian_correlation(xf, xf, sigma);
    new_alphaf_num = yf .* kf;
    new_alphaf_den = kf .* (kf + lambda);
    
    %patchL = LogPolarTransform(double(patch), 1, 1.02, 180, 360);
    %xsr = get_features(patchL, scale_rot_features, hog_params, {});
    %xsrf = fft(xsr);
    
    %new_sr_hnum = bsxfun(@times, ysrf, conj(xsrf));
    %new_sr_hden = sum(xsrf.*conj(xsrf), 3);
    %alphaf = yf ./ (kf + lambda);
    
    if frame == 1
        % first frame, train with a single image
        alphaf_num = new_alphaf_num;
        alphaf_den = new_alphaf_den;
        model_xf = xf;
        %model_alphaf = alphaf;
        new_model = false;
    else
        % subsequent frames, update the model
        alphaf_num = (1 - learning_rate) * alphaf_num + learning_rate * new_alphaf_num;
        alphaf_den = (1 - learning_rate) * alphaf_den + learning_rate * new_alphaf_den;
        %model_alphaf = (1 - learning_rate) * model_alphaf + learning_rate * alphaf;
        model_xf = (1 - learning_rate ) * model_xf + learning_rate * xf;
    end
    
    %save position
    positions(frame,:) = [pos target_sz];
    
    time = time + toc;
    
    %visualization
    if visualization == 1
        rect_position = [pos([2,1]) - target_sz([2,1])/2, target_sz([2,1])];
        if frame == 1,  %first frame, create GUI
            figure('Name',['Tracker - ' video_path]);
            im_handle = imshow(uint8(im), 'Border','tight', 'InitialMag', 100 + 100 * (length(im) < 500));
            rect_handle = rectangle('Position',rect_position, 'EdgeColor','g');
            text_handle = text(10, 10, int2str(frame));
            set(text_handle, 'color', [0 1 1]);
            %apce_handle = text(0,60,0.2, '0');
            figure;conf_handle=plot(1:num_frames, apce_confs);title('APCE plot');
            figure;conf2_handle=plot(1:num_frames, psr_confs);title('PSR plot');
        else
            try  %subsequent frames, update GUI
                set(im_handle, 'CData', im)
                set(rect_handle, 'Position', rect_position)
                set(text_handle, 'string', int2str(frame));
                %set(apce_handle, 'string',['APCE=' num2str(apce)]);
                %set(apce_handle, 'Position', [0 60 0.8*max(response(:))]);
                set(conf_handle, 'YData', apce_confs);
                set(conf2_handle, 'YData', psr_confs);
            catch
                return
            end
        end
        
        drawnow
    end
end

fps = num_frames/time;
end
