clear,clc
% [precision, fps] = run_tracker('Basketball', 'gaussian', 'hog');
base_path = 'C:\Users\15-0135\Desktop\代码\matlab\tracker_release2\data\Benchmark';
video = choose_video(base_path);
[img_files, pos, target_sz, ground_truth, video_path] = load_video_info(base_path, video);
padding = 1.5;  %extra area surrounding the target目标搜索范围系数，决定了每帧进行检测的范围
lambda = 1e-4;  %regularization 正则化参数
output_sigma_factor = 0.1;  %spatial bandwidth (proportional to target)空间带宽：与目标大小成比例
show_visualization = 1;
%特征参数
interp_factor = 0.02;	
kernel.sigma = 0.5;
kernel.poly_a = 1;
kernel.poly_b = 9;
features.gray = false;
features.hog = true;
features.hog_orientations = 9;
cell_size = 4;


%开始运行tracker
resize_image = (sqrt(prod(target_sz)) >= 100);  %diagonal size >= threshold
if resize_image
    pos = floor(pos / 2);
    target_sz = floor(target_sz / 2);
end

%window size, taking padding into account
%确定检测范围window_sz，作者将其设为跟踪框尺寸的1+padding倍
%（如果有快速运动，目标在两帧间的移动超过了这个范围，会导致跟踪丢失？）
window_sz = floor(target_sz * (1 + padding));

% 	%we could choose a size that is a power of two, for better FFT
% 	%performance. in practice it is slower, due to the larger window size.
% 	window_sz = 2 .^ nextpow2(window_sz);


%create regression labels, gaussian shaped, with a bandwidth
%proportional to target size
output_sigma = sqrt(prod(target_sz)) * output_sigma_factor / cell_size; %用output_sigma_factor，cell_sz和跟踪框尺寸计算高斯标签的带宽output_sigma 
yf = fft2(gaussian_shaped_labels(output_sigma, floor(window_sz / cell_size))); %先用gaussian_shaped_labels生成回归标签，然后进行傅里叶变换转换到频域上的yf
%制作高斯标签;;
 sz = floor(window_sz / cell_size);
 [rs, cs] = ndgrid((1:sz(1)) - floor(sz(1)/2), (1:sz(2)) - floor(sz(2)/2)); %借助ndgrid函数生成了回归标签labels 此时生成的回归标签的峰值在中心
% sigma = output_sigma
% labels = exp(-0.5 / sigma^2 * (rs.^2 + cs.^2))
% mesh(rs,cs,labels)
% labels2 = circshift(labels, -floor(sz(1:2) / 2) + 1); %再用circshift将labels的峰值移到左上角
% figure(2)
% mesh(rs,cs,labels2)
% plot(3)
% mesh(rs,cs,real(yf))
cos_window = hann(size(yf,1)) * hann(size(yf,2))';
% mesh(rs,cs,cos_window)
update_visualization = show_video(img_files, video_path, resize_image);
%note: variables ending with 'f' are in the Fourier domain.
%position记录每帧目标中心位置，time记录用时，图片读取和显示的时间不计入
time = 0;  %to calculate FPS
positions = zeros(numel(img_files), 2);  %to calculate precision
im = imread([video_path img_files{1}]);
if size(im,3) > 1
    im = rgb2gray(im);
end
if resize_image
    im = imresize(im, 0.5);
end

tic()

%obtain a subwindow for training at newly estimated target position
patch = get_subwindow(im, pos, window_sz);
% sz = window_sz;
% if isscalar(sz)  %square sub-window
%     sz = [sz, sz];
% end
% %以目标中心位置为中心，扩展出大小等于window_sz的矩形窗口
% xs = floor(pos(2)) + (1:sz(2)) - floor(sz(2)/2);
% ys = floor(pos(1)) + (1:sz(1)) - floor(sz(1)/2);
% 
% %check for out-of-bounds coordinates, and set them to the values at
% %the borders
% %当patch超出图片边缘时，认为超出部分的灰度值与边缘相同
% xs(xs < 1) = 1;
% ys(ys < 1) = 1;
% xs(xs > size(im,2)) = size(im,2);
% ys(ys > size(im,1)) = size(im,1);
% 
% %extract image
% %在图像中提取出检测区域patch
% out = im(ys, xs, :);
xf = fft2(get_features(patch, features, cell_size, cos_window)); %利用get_feature获得第一帧patch的特征矩阵，再经过傅里叶变换到频率域得到xf 
% im = patch
% if features.hog,
%     %HOG features, from Piotr's Toolbox
%     x = double(fhog(single(im) / 255, cell_size, features.hog_orientations))
%     x(:,:,end) = []  %remove all-zeros channel ("truncation feature")将第三维度的最后置空
% end

% if ~isempty(cos_window),
%     x = bsxfun(@times, x, cos_window);
% end
% x=fft2(x);
kf = gaussian_correlation(xf, xf, kernel.sigma); %利用gaussian_correlation得到频率域上的高斯响应kf
alphaf = yf ./ (kf + lambda);   %equation for fast training岭回归计算，得到分类器参数alphaf
model_alphaf = alphaf;
model_xf = xf;
%将以上得到的alphaf和xf作为第一帧时分类器训练的结果，初始化结束
frame = 1;
positions(frame,:) = pos;
time = time + toc();
box = [pos([2,1]) - target_sz([2,1])/2, target_sz([2,1])];
stop = update_visualization(frame, box);
drawnow
pause(0.05)  %uncomment to run slower

frame = 2;
im = imread([video_path img_files{frame}]);
if size(im,3) > 1,
    im = rgb2gray(im);
end
if resize_image,
    im = imresize(im, 0.5);
end

tic()
patch = get_subwindow(im, pos, window_sz);%在上一帧的跟踪结果pos的基础上，根据window_sz在这一帧图像上提取检测区域patch 这一帧的训练，与下一帧的检测，使用的是同一块patch 
zf = fft2(get_features(patch, features, cell_size, cos_window));%利用get_feature得到检测区域patch的特征zf 
kzf = gaussian_correlation(zf, model_xf, kernel.sigma);%由zf和model_xf计算高斯响应kzf 
response = real(ifft2(model_alphaf .* kzf));  %equation for fast detection model_alphaf与kzf点乘后进行傅里叶反变换，回到时域。保留实部，得到实数响应图response map，并且响应均为归一化值
% figure(2)
% mesh(rs,cs,real(kzf))
figure(3)
mesh(rs,cs,response)
[vert_delta, horiz_delta] = find(response == max(response(:)), 1);
if vert_delta > size(zf,1) / 2,  %wrap around to negative half-space of vertical axis
    vert_delta = vert_delta - size(zf,1);
end
if horiz_delta > size(zf,2) / 2,  %same for horizontal axis
    horiz_delta = horiz_delta - size(zf,2);
end
pos = pos + cell_size * [vert_delta - 1, horiz_delta - 1];
patch = get_subwindow(im, pos, window_sz);
xf = fft2(get_features(patch, features, cell_size, cos_window)); %利用get_feature获得第一帧patch的特征矩阵，再经过傅里叶变换到频率域得到xf 
kf = gaussian_correlation(xf, xf, kernel.sigma); %利用gaussian_correlation得到频率域上的高斯响应kf 
alphaf = yf ./ (kf + lambda);   %equation for fast training岭回归计算，得到分类器参数alphaf
model_alphaf = (1 - interp_factor) * model_alphaf + interp_factor * alphaf;
model_xf = (1 - interp_factor) * model_xf + interp_factor * xf;
positions(frame,:) = pos;
time = time + toc();
% box = [pos([2,1]) - target_sz([2,1])/2, target_sz([2,1])];
% stop = update_visualization(frame, box);
% drawnow

frame = 3;
im = imread([video_path img_files{frame}]);
if size(im,3) > 1,
    im = rgb2gray(im);
end
if resize_image,
    im = imresize(im, 0.5);
end

tic()
patch = get_subwindow(im, pos, window_sz);%在上一帧的跟踪结果pos的基础上，根据window_sz在这一帧图像上提取检测区域patch 这一帧的训练，与下一帧的检测，使用的是同一块patch 
zf = fft2(get_features(patch, features, cell_size, cos_window));%利用get_feature得到检测区域patch的特征zf 
kzf = gaussian_correlation(zf, model_xf, kernel.sigma);%由zf和model_xf计算高斯响应kzf 
response = real(ifft2(model_alphaf .* kzf));  %equation for fast detection model_alphaf与kzf点乘后进行傅里叶反变换，回到时域。保留实部，得到实数响应图response map，并且响应均为归一化值
% figure(2)
% mesh(rs,cs,real(kzf))
figure(3)
mesh(rs,cs,response)
[vert_delta, horiz_delta] = find(response == max(response(:)), 1);
if vert_delta > size(zf,1) / 2,  %wrap around to negative half-space of vertical axis
    vert_delta = vert_delta - size(zf,1);
end
if horiz_delta > size(zf,2) / 2,  %same for horizontal axis
    horiz_delta = horiz_delta - size(zf,2);
end
pos = pos + cell_size * [vert_delta - 1, horiz_delta - 1];
patch = get_subwindow(im, pos, window_sz);
xf = fft2(get_features(patch, features, cell_size, cos_window)); %利用get_feature获得第一帧patch的特征矩阵，再经过傅里叶变换到频率域得到xf 
kf = gaussian_correlation(xf, xf, kernel.sigma); %利用gaussian_correlation得到频率域上的高斯响应kf 
alphaf = yf ./ (kf + lambda);   %equation for fast training岭回归计算，得到分类器参数alphaf
model_alphaf = (1 - interp_factor) * model_alphaf + interp_factor * alphaf;
model_xf = (1 - interp_factor) * model_xf + interp_factor * xf;
positions(frame,:) = pos;
time = time + toc();
box = [pos([2,1]) - target_sz([2,1])/2, target_sz([2,1])];
stop = update_visualization(frame, box);
drawnow

