function retval = read_from_file (filename)
	retval = {};
	fid = fopen(filename, "r");
	while (-1 ~= (line = fgetl(fid)))
		match = regexp(line, 'C');
		if(match > 0)
			info = strsplit(line);
			nh = regexp(info{1,2}, 'h(\d+)', 'tokens'){1,1}{1,1};	#get height of the pic
			nw = regexp(info{1,3}, 'w(\d+)', 'tokens'){1,1}{1,1};	#get width of the pic	
			
			data = [];
			for k = 1:str2num(nh)
				line = fgetl(fid);
				data = [data;line];
			end
			data;
			retval{length(retval)+1}= data;
		end
	end
	fclose(fid);
endfunction

#compute the black area of an image
function black_area = compute_black_area (a_image)
	black_area = 0;
	[nr, nc] = size(a_image);
	for i = 1:nr
		for j = 1:nc
			if (a_image(i,j) == 'x')
				black_area++;
			end
		end
	end
endfunction

#compute the top heavy feature
function top_heavy = compute_top_heavy (a_image, topmost_rows)
	top_heavy = 0;
	
	black_area = compute_black_area(a_image);

	top_area = 0;
	[nr, nc] = size(a_image);
	for i = 1:topmost_rows
		for j = 1:nc
			if (a_image(i,j) == 'x')
				top_area++;
			end
		end
	end
	bottom_area = black_area - top_area;

	top_heavy = floor(((100.0*top_area) / bottom_area ) + 0.5);
endfunction

function [Tstar, Type_of_Rule] = build_classifier (training_data_files)
	#initialize
	training_data = {};		#create new empty training data set
	labels = {'_c', '_e'};		#create	data labels 
	features = {};			#feature values of all samples

	for i = 1:length(labels)
		training_data{length(training_data) + 1} = {};	#this is for storing data of each label
		features{length(features) + 1} = [];
	end
	
	#read training data files
	#and put data read into their labeled container.
	for f = 1:length(training_data_files)
		for l = 1:length(labels)
			if (regexp(training_data_files{f}, labels{l}) > 0)	#this is a file containing samples of label labels{l}
				data = read_from_file(training_data_files{f});
				training_data{l}{length(training_data{l}) + 1} = data;
			end
		end
	end

	#compute the black area of each c image
	for l = 1:length(labels)
		for len = 1:length(training_data{l})
			for s = 1:length(training_data{l}{len})
				features{l}(length(features{l}) + 1) = compute_top_heavy (training_data{l}{len}{s}, 12);
			end
		end
	end
		
#	features;

	#generate histgrams for each labeled data
#	edges = {};
#	hists = {};
#	for l = 1:length(labels)
#		edges{l} = [min(features{l}):max(features{l})];
#		hists{l} = histc(features{l}, edges{l});	
#	end

#	hists;

	#actual plot histgrams
#	figure;
#	for l = 1:length(labels)
#		subplot(length(labels),1,l);
#		bar(edges{l},hists{l},'histc');
#		axis([min(edges{l}(1), edges{l}(1)) max(edges{l}(length(edges{l})), edges{l}(length(edges{l}))) 0 max(max(hists{l}), max(hists{l}))+1]);
#	end

	#try to find a threshold T that minimizes the cost when applying 
	#"if (area-proportion <= T%) decide 'c'; else decide 'e'" as a pattern of classifier
	Mincost = intmax;
	for T = min(min(features{1}),min(features{2})) : max(max(features{1}),max(features{2}))
		cost1 = 0;
		cost2 = 0;

		for i = 1:length(features{1})
			if (features{1}(i) > T)
				cost1++;
			end
			if (features{1}(i) < T)
				cost2++;
			end
		end
		for i = 1:length(features{2})
			if (features{2}(i) <= T)
				cost1++;
			end
			if (features{2}(i) >= T)
				cost2++;
			end
		end

		if (cost1 < Mincost)
			Mincost = cost1;
			Tstar = T;
			Type_of_Rule = 'LessOrEqual';
		end
		if (cost2 < Mincost)
			Mincost = cost2;
			Tstar = T;
			Type_of_Rule = 'GreaterOrEqual';
		end
	end
	
	if (Type_of_Rule == 'LessOrEqual')
		printf("The best decision rule found: if (top-heavy <= %i) decide 'c'; else decide 'e'\n", Tstar);
	else
		printf("The best decision rule found: if (top-heavy >= %i) decide 'c'; else decide 'e'\n", Tstar);
	end

	printf("the threshold = %i\n", Tstar);
	printf("Error Rate on training data set: %.4f\n", Mincost/(length(features{1}) + length(features{2})));
endfunction

#Test other data in the test set
function Error_rate = test_classifier(testing_data_files, T, Type_of_Rule)
	Error_rate = 0;

	#initialize
	testing_data = {};		#create new empty training data set
	labels = {'_c', '_e'};		#create	data labels 
	features = {};			#feature values of all samples

	for i = 1:length(labels)
		testing_data{length(testing_data) + 1} = {};	#this is for storing data of each label
		features{length(features) + 1} = [];
	end
	
	#read testing data files
	#and put data read into their labeled container.
	for f = 1:length(testing_data_files)
		for l = 1:length(labels)
			if (regexp(testing_data_files{f}, labels{l}) > 0)	#this is a file containing samples of label labels{l}
				data = read_from_file(testing_data_files{f});
				testing_data{l}{length(testing_data{l}) + 1} = data;
			end
		end
	end

	#compute the feature for each image
	for l = 1:length(labels)
		for len = 1:length(testing_data{l})
			for s = 1:length(testing_data{l}{len})
				features{l}(length(features{l}) + 1) = compute_top_heavy (testing_data{l}{len}{s}, 12);
			end
		end
	end

	switch (Type_of_Rule)
		case 'LessOrEqual'	#if (feature <= T) decide 'c'; else decide 'e'
			Error_rate = 0;
			for i = 1:length(features{1})
				if (features{1}(i) > T)
					Error_rate++;
				end
			end
			for i = 1:length(features{2})
				if (features{2}(i) <= T)
					Error_rate++;
				end
			end
		case 'GreaterOrEqual'	#if (feature >= T) decide 'c'; else decide 'e'
			Error_rate = 0;
			for i = 1:length(features{1})
				if (features{1}(i) < T)
					Error_rate++;
				end
			end
			for i = 1:length(features{2})
				if (features{2}(i) >= T)
					Error_rate++;
				end
			end
		otherwise
	endswitch
	Error_rate /= (length(features{1}) + length(features{2}));
endfunction

#TASK A
printf("Task A:\n");
training_data_files = {"data/HW2_data_c_1.txt", "data/HW2_data_e_1.txt"};
[T, Type_of_Rule] = build_classifier(training_data_files);
error_rates = [];
for t = 2:10
	testing_data_files = {};
	testing_data_files{1} = sprintf("data/HW2_data_c_%d.txt", t);
	testing_data_files{2} = sprintf("data/HW2_data_e_%d.txt", t);
	
	error_rates(length(error_rates)+1) = test_classifier(testing_data_files, T, Type_of_Rule);
end
printf("error rates:\n")
for t = 1:length(error_rates)
	printf("\t%.4f", error_rates(t));
end
printf("\n");

printf("\n\tmean = %.4f", mean(error_rates));
printf("\n\tstandard deviation = %.4f\n\n", std(error_rates));

#TASK B
printf("Task B:\n");
system("cat data/HW2_data_c_1.txt data/HW2_data_c_2.txt data/HW2_data_c_3.txt data/HW2_data_c_4.txt data/HW2_data_c_5.txt > data/HW2_data_c_1-5.txt");
system("cat data/HW2_data_c_6.txt data/HW2_data_c_7.txt data/HW2_data_c_8.txt data/HW2_data_c_9.txt data/HW2_data_c_10.txt > data/HW2_data_c_6-10.txt");
system("cat data/HW2_data_e_1.txt data/HW2_data_e_2.txt data/HW2_data_e_3.txt data/HW2_data_e_4.txt data/HW2_data_e_5.txt > data/HW2_data_e_1-5.txt");
system("cat data/HW2_data_e_6.txt data/HW2_data_e_7.txt data/HW2_data_e_8.txt data/HW2_data_e_9.txt data/HW2_data_e_10.txt > data/HW2_data_e_6-10.txt");

training_data_files = {"data/HW2_data_c_1-5.txt", "data/HW2_data_e_1-5.txt"};
[T, Type_of_Rule] = build_classifier(training_data_files);
testing_data_files = {"data/HW2_data_c_6-10.txt", "data/HW2_data_e_6-10.txt"};
error_rate = test_classifier(testing_data_files, T, Type_of_Rule);
printf("\n\terror_rate = %.4f", error_rate);
printf("\n");

#TASK C
error_rates = [];
for t = 1:10	#the pair that will be used in testing
	training_data_files = {};
	testing_data_files = {};
	for i = 1:10
		if (i == t)
			testing_data_files{1} = sprintf("data/HW2_data_c_%d.txt", t);
			testing_data_files{2} = sprintf("data/HW2_data_e_%d.txt", t);
		else
			training_data_files{length(training_data_files) + 1} = sprintf("data/HW2_data_c_%d.txt", i);
			training_data_files{length(training_data_files) + 1} = sprintf("data/HW2_data_e_%d.txt", i);
		end
	end

	[T, Type_of_Rule] = build_classifier(training_data_files);
	error_rates(t) = test_classifier(testing_data_files, T, Type_of_Rule);	
end
printf("error rates:\n")
for t = 1:length(error_rates)
	printf("\t%.4f", error_rates(t));
end
printf("\n");
printf("\n\tmean = %.4f", mean(error_rates));
printf("\n\tstandard deviation = %.4f\n\n", std(error_rates));
