%r1:: num_uncertain = 100, scaling_a = 1.1, scaling_d = 1,
%num_important_features = 100

split_number = 1;
%load 'e:\Users\sai\data\allVariablesICMLCrossFoldReworkRatioSampledSplit1.mat'
%free some memory
clear claim_mat

%initialize variables
training_pool = matrix_base_train; %initial model training on this
active_pool = matrix_base_activepool; %active learning queries from here and adds to the training pool
testing_pool_all = matrix_test; %this pool is never altered (only used for testing)

%test splits with proper distributions
testing_pool_found_at = zeros(size(testing_pool_all, 1), size(ids_modifiedTestMatrices, 3));

%{
sparse(zeros(
	size(ids_modifiedTestMatrices, 1),...%number of claims
	size(testing_pool_all,2),...%number of (features + label)
	size(ids_modifiedTestMatrices, 3)... %number of splits
));
%}
for i=1:(size(testing_pool_found_at,2))
	testing_pool_found_at(:,i) = ismember(ids_test_claim, ids_modifiedTestMatrices(:,:,i));
end

testing_pool_size = size(ids_modifiedTestMatrices, 1); %number of claims in the testing pool modified

%parameters
batch_size = 1000; %number of uncertain claims to query at each active learning iteration
num_important_features = 5; %maximum number of features to mark as important
important_feature_score_threshold = 0.9; %minimum score threshold to be considered important

for test_split_number=1:3
	%LONG-ASS way to get one proper split test pool data
	n = 1:(size(testing_pool_all,1));
	sel = n(testing_pool_found_at(:,test_split_number)==1);
	testing_pool = testing_pool_all(sel, :);
	
	%reset the pools
	training_pool = []; %initial model training on this
	active_pool = matrix_base_activepool; %active learning queries from here and adds to the training pool
	num_uncertain = batch_size;

	final_model = train([training_pool(:,1);active_pool(:,1)], [training_pool(:,2:end);active_pool(:,2:end)]);
	feature_weight_repmat = repmat(final_model.w, num_uncertain, 1); %feature weight repeat matrix (easier for pairwise multiplication for getting feature scores)

	%parameters for storing results

	precision_at = [1 2 5]; %precision at X percent
	precision_size = ceil(precision_at*testing_pool_size/100);
	precision = zeros(1, length(precision_at));
	results = zeros(ceil(size(active_pool, 1)/num_uncertain), 2+length(precision));
	results_index = 1;
	fid = fopen(['baseline1000_split' num2str(split_number) '_test_split' num2str(test_split_number) '.csv'],'w');

	%train model on training pool
	model = train(training_pool(:,1), training_pool(:,2:end));

	%predict and find most uncertain
	[predicted, accuracy, estimates] = predict(active_pool(:,1), active_pool(:,2:end), model);
	[~,uncertainty_order] = sort(abs(estimates), 'ascend');
	most_uncertain_claims = (active_pool(uncertainty_order(1:num_uncertain, :), :));

	out_format = ['%d,' repmat('%.6f,', 1, 1+length(precision))];
	out_format = [out_format(1:(length(out_format)-1)) '\n'];

	%while size(active_pool, 1) > 0
	for dummy=1:10
		%START-metric
		'recording'
		%accuracy
		[predicted, accuracy, estimates] = predict(testing_pool(:,1), testing_pool(:,2:end), model);
		%precision
		estimates = estimates*model.Label(1); %estimate labels may not match properly (model may be reporting with 1 and -1 swapped)
		[~, sort_index] = sort(estimates, 'descend');
		testing_pool_reordered = testing_pool(sort_index);
		for p=1:length(precision)
			precision(p) = nnz(testing_pool_reordered(1:precision_size(p),1)>0)/precision_size(p);
		end
		%record result
		results(results_index, :) = [size(training_pool, 1) accuracy precision];
		fprintf(fid, out_format, results(results_index,:));
		results_index = results_index + 1;
		%END-metric
		
		%remove from active pool and add to training pool
		active_pool = active_pool(uncertainty_order((num_uncertain+1):end, :), :);
		training_pool = [training_pool; most_uncertain_claims];

		
		%select important features
		claim_feature_scores = abs(most_uncertain_claims(:,2:end) .* feature_weight_repmat);
		%{
		%METHOD 1 - select features by finding maximum value for each feature
		%from the uncertain claims and then find important features from these
		claim_feature_scores_sorted = sort(claim_feature_scores, 'descend'); %sort within each column to fid maximum
		[top_claim_feature_scores_sorted, top_claim_feature_scores_index] = sort(claim_feature_scores_sorted(1,:), 'descend');
		%take out feature scores that are not higher than the feature score threshold
		top_claim_features_scores_index = top_claim_feature_scores(1:num_important_features);
		top_claim_feature_scores_sorted = top_claim_feature_scores_sorted(1:num_important_features);
		top_claim_features_scores_index = top_claim_features_scores_index(top_claim_feature_scores_sorted>=important_feature_score_threshold);
		
		%METHOD 2 - find the top features from each claim and then take the
		%union of all these important feature sets
		[claim_feature_scores_sorted, claim_feature_scores_index] = sort(claim_feature_scores, 2, 'descend');
		%get only top features
		claim_feature_scores_index = claim_feature_scores_index(:, 1:num_important_features);
		claim_feature_scores_sorted = claim_feature_scores_sorted(:, 1:num_important_features);
		%flatten matrix into vector
		claim_feature_scores_index = claim_feature_scores_index(:);
		claim_feature_scores_sorted = claim_feature_scores_sorted(:);
		claim_feature_scores_index = claim_feature_scores_index(claim_feature_scores_sorted>=important_feature_score_threshold);
		top_claim_features_scores_index = claim_feature_scores_index;
		%incorporate features into model
		scaling_a = 1; %scale up factor for important features
		scaling_d = 0; %scale down factor for other features
		scaling_vector = repmat(scaling_d, size(model.w));
		scaling_vector(top_claim_features_scores_index) = scaling_a;
		model.w = model.w .* scaling_vector;
		%}

		%select next instances for query
		if size(active_pool, 1) < num_uncertain
			num_uncertain = size(active_pool, 1);
			feature_weight_repmat = repmat(final_model.w, num_uncertain, 1);
		end
		[predicted, accuracy, estimates] = predict(active_pool(:,1), active_pool(:,2:end), model);
		[~,uncertainty_order] = sort(abs(estimates), 'ascend');
		most_uncertain_claims = (active_pool(uncertainty_order(1:num_uncertain, :), :));

		%incorporate instances into model (retrain)
		model = train(training_pool(:,1), training_pool(:,2:end));
	end

	fclose(fid);
end
