import os
import sys
import subprocess
import re
import math
import numpy as np

#1. Preprocess
#read data files (training + testing) A0-A9 B0-B9
#place each character in the center of a 16X16 image
#and generating corresponding new image in the new file

target_size = 16
os.system("rm -fr hw5_data; unzip hw5_data.zip > /dev/null")
output = subprocess.check_output("find ./hw5_data/*", shell=True)
datafiles = output.strip().split("\n")

for datafile in datafiles:
	data = []
	fileptr = open(datafile)
	for line in fileptr:
		if re.match('^C', line):
			data.append([])
			data[-1].append(line)
		else:
			data[-1].append(line)
	fileptr.close()
					
	writefile = datafile + ".txt"
	wfileptr = open(writefile, 'w')
	for image in data:
		tokens = re.split(' [hbw]', image[0])
		h = int(tokens[1])
		w = int(tokens[2])
#		assert (target_size >= h and target_size >= w)
		
		new_image = []
		top = (target_size - h)/2
		bottom = target_size - h - top
		left = (target_size - w)/2
		right = target_size - w - left
		new_image.append("C" + " h" + str(target_size) + " w" + str(target_size) + "\n")
		for i in range(top):
			new_image.append("."*target_size + "\n")
		for i in range(len(image)-1):
			new_image.append("."*left + image[i+1].strip() + "."*right + "\n")
		for i in range(bottom):
			new_image.append("."*target_size + "\n")
		
		for i in range(len(new_image)):
			wfileptr.write(new_image[i])
	wfileptr.close()

#2. Extracting "Central Moment" image features
def load_from_data_files(datafiles):
	all_data = []
	for datafile in datafiles:
		data = []
		fileptr = open(datafile)
		for line in fileptr:
			if re.match('^C', line):
				data.append([])
			else:
				data[-1].append(line.strip())
		fileptr.close()
		all_data.append(data)
	return all_data

output = subprocess.check_output("find ./hw5_data/A-*.txt.txt", shell=True)
datafiles = output.strip().split("\n")
training_data = load_from_data_files(datafiles)

output = subprocess.check_output("find ./hw5_data/B-*.txt.txt", shell=True)
datafiles = output.strip().split("\n")
testing_data = load_from_data_files(datafiles)

def moment(p, q, image):
	"Calculate the moment of an image"
	mpq = 0.0
	for y in range(len(image)):
		for x in range(len(image[y])):
			if image[y][x] == 'x':
				mpq += math.pow(x+1,p)*math.pow(y+1,q)
	return mpq
			
def central_moment(p, q, image):
	"Calculate the central moment of an image"
	xc = 1.0 * moment(1,0,image) / moment(0,0,image)
	yc = 1.0 * moment(0,1,image) / moment(0,0,image)
	Mpq = 0.0
	for y in range(len(image)):
		for x in range(len(image[y])):
			if image[y][x] == 'x':
				Mpq += math.pow(x+1-xc,p)*math.pow(y+1-yc,q)
	return Mpq
				
def calculate_moment_features(image):
	"Calculate the moment feature vector of an 16X16 image"
	return [central_moment(1,1,image),	\
		central_moment(2,1,image),	\
		central_moment(1,2,image),	\
		central_moment(3,1,image),	\
		central_moment(2,2,image),	\
		central_moment(1,3,image),	\
		central_moment(4,1,image),	\
		central_moment(3,2,image),	\
		central_moment(2,3,image),	\
		central_moment(1,4,image)]

def calculate_pixel_features(image):
	"Calculate the pixel feature vector of an 16X16 image"
	features = []
	for y in range(len(image)):
		for x in range(len(image[y])):
			if (image[y][x] == 'x'):
				features.append(1)
			else:
				features.append(0)
	return features

def root_mean_square(feature_vector):
	"Calculate the root_mean_square among all feature vectors"
	rms = []
	for e in range(10):
		t = 0.0
		for c in range(len(feature_vector)):
			for img in range(len(feature_vector[c])):
				t += feature_vector[c][img][e]*feature_vector[c][img][e]
		t /= len(feature_vector)*len(feature_vector[0])
		rms.append(math.sqrt(t))
	return rms

def normalize(feature_vector, normalizing_vector):
	if normalizing_vector == None:
		rms = root_mean_square(feature_vector)
	else:
		rms = normalizing_vector

	for c in range(len(feature_vector)):
		for img in range(len(feature_vector[c])):
			for e in range(10):
				feature_vector[c][img][e] /= rms[e]
        return rms
				
def extract_features(data, feature_cal_func, whether_to_normalize=False, normalizing_vector=None):
	feature_vector = []
	for f in range(len(data)):
		feature_vector.append([])
		for img in range(len(data[f])):
			feature_vector[-1].append(feature_cal_func(data[f][img]))
	if whether_to_normalize:
		nv = normalize(feature_vector, normalizing_vector)
		return [feature_vector, nv]
	return [feature_vector, None]

#3. train four classifiers
def cal_class_means(training_feature_vector):
	class_means = []
	for c in range(len(training_feature_vector)):
		mean = np.matrix([0.0]*len(training_feature_vector[c][0]))
		for img in range(len(training_feature_vector[c])):
			mean += np.matrix(training_feature_vector[c][img])
		mean = mean/len(training_feature_vector[c])
		class_means.append(mean)
	return class_means

def cal_class_covariances(training_feature_vector):
	class_covariance_matrices = []
	for c in range(len(training_feature_vector)):
		cf = np.zeros((len(training_feature_vector[c]), len(training_feature_vector[c][0])))
		for img in range(len(training_feature_vector[c])):
			cf[img] = training_feature_vector[c][img]
		cf = cf.T
		covariance_matrix = np.cov(cf)
		class_covariance_matrices.append(covariance_matrix)
	return class_covariance_matrices

def cal_class_each_feature_frequency(training_feature_vector):
	"""
		assuming conditionally-independent features and each feature only has val 0 or 1;
		this function calculates p_ij defined as P(x_i == 1 | w_j)
	"""
	class_each_feature_frequences = []
	for c in range(len(training_feature_vector)):
		each_feature_frequence = np.array([0.0]*len(training_feature_vector[c][0]))
		for img in range(len(training_feature_vector[c])):
			each_feature_frequence += np.array(training_feature_vector[c][img])
		each_feature_frequence = each_feature_frequence/len(training_feature_vector[c])
		
		#smoothing	
		for i in range(len(each_feature_frequence)):
			if each_feature_frequence[i] == 0.0:
				each_feature_frequence[i] = 1.0/(3*len(training_feature_vector[c]))
			elif each_feature_frequence[i] == 1.0:
				each_feature_frequence[i] = (3.0*len(training_feature_vector[c])-1)/(3.0*len(training_feature_vector[c]))
		
		class_each_feature_frequences.append(each_feature_frequence)
	return class_each_feature_frequences

def print_errors(title, errors):
	"print the errors nicely"
	error_type_II = [0]*10
	print "###CONFUSION TABLE"
	print "####" + title
	print "| True class/Classified as: \t|0\t|1\t|2\t|3\t|4\t|5\t|6\t|7\t|8\t|9\t|\tErrorTypeI\t|"
	print "| ------------------\t\t|---\t|---\t|---\t|---\t|---\t|---\t|---\t|---\t|---\t|---\t|-----------------\t|"
	for c in range(len(errors)):
		print "|\t\t" + str(c) + "\t\t|",
		error_type_I = 0
		for i in range(len(errors[c])):
			if errors[c][i] != 0:
				print str(errors[c][i]) + "\t|",
				if i != c:
					error_type_I += errors[c][i]
					error_type_II[i] += errors[c][i]
			else:
				print "\t|",
		print "\t" + str(error_type_I) + "\t\t|"

	print "|\t\tErrorTypeII\t|",
	total = 0
	for i in range(len(error_type_II)):
		total += error_type_II[i]
		print str(error_type_II[i]) + "\t|",
	print "\t" + str(total) + "\t\t|"
	
#3.1 && 3.2: extract moment features of training data
pair = extract_features(training_data, calculate_moment_features, True)
training_feature_vector = pair[0]
normalizing_vector = pair[1]

def classifier_1(title, testing_data):
	"moment space; assuming identity matrix; so classify samples to the class whose class mean is nearest to the sample"
	#training
	class_means = cal_class_means(training_feature_vector)

	#testing
	error_count = 0
	errors = [x[:] for x in [[0]*10]*10]
	pair = extract_features(testing_data, calculate_moment_features, True, normalizing_vector)
	testing_feature_vector = pair[0]

	for c in range(len(testing_feature_vector)):
		for img in range(len(testing_feature_vector[c])):
			nearest = sys.maxint
			nearest_i = -1
			for i in range(len(class_means)):
				dist = np.linalg.norm(np.array(testing_feature_vector[c][img]) - np.array(class_means[i]))
				if dist < nearest:
					nearest = dist
					nearest_i = i
			if nearest_i != c:
				error_count += 1
			errors[c][nearest_i] += 1
	print "error count: " + str(error_count) + "\n"
	print_errors(title, errors)

print "applying classifier 1 on training data set"
classifier_1("Method 1 - Ten Moments, Identity Covariance Matrices (trained on A, tested on A)", training_data)
print "applying classifier 1 on testing data set"
classifier_1("Method 1 - Ten Moments, Identity Covariance Matrices (trained on A, tested on B)", testing_data)

def classifier_2(title, testing_data):
	"moment space; assuming identical covariance matrices"
	#training
	class_means = cal_class_means(training_feature_vector)
	class_covariance_matrices = cal_class_covariances(training_feature_vector)
	
	avg_covariance_matrix = np.zeros((len(training_feature_vector[0][0]), len(training_feature_vector[0][0])))
	for i in range(len(class_covariance_matrices)):
		avg_covariance_matrix += class_covariance_matrices[i]
	avg_covariance_matrix /= len(class_covariance_matrices)
	
	#testing
	error_count = 0
	errors = [x[:] for x in [[0]*10]*10]
	pair = extract_features(testing_data, calculate_moment_features, True, normalizing_vector)
	testing_feature_vector = pair[0]

	for c in range(len(testing_feature_vector)):
		for img in range(len(testing_feature_vector[c])):
			nearest = sys.maxint
			nearest_i = -1
			for i in range(len(class_means)):
				x = np.array(testing_feature_vector[c][img])
				mean_i = np.array(class_means[i])
				temp = x - mean_i
				dist = np.dot(np.dot(temp, np.linalg.inv(avg_covariance_matrix)),temp.T)
				if dist < nearest:
					nearest = dist
					nearest_i = i
			if nearest_i != c:
				error_count += 1
			errors[c][nearest_i] += 1

	print "error count: " + str(error_count) + "\n"
	print_errors(title, errors)
				
print "applying classifier 2 on training data set"
classifier_2("Method 2 - Ten Moments, Identical Covariance Matrices (trained on A, tested on A)", training_data)
print "applying classifier 2 on testing data set"
classifier_2("Method 2 - Ten Moments, Identical Covariance Matrices (trained on A, tested on B)", testing_data)

#3.3 && 3.4: extract pixel features of training data
training_feature_vector = extract_features(training_data, calculate_pixel_features)[0]

def classifier_3(title, testing_data):
	"pixel space; assuming identity matrix; so classify samples to the class whose class mean is nearest to the sample"
	#training
	class_means = cal_class_means(training_feature_vector)

	#testing
	error_count = 0
	errors = [x[:] for x in [[0]*10]*10]
	testing_feature_vector = extract_features(testing_data, calculate_pixel_features)[0]
	for c in range(len(testing_feature_vector)):
		for img in range(len(testing_feature_vector[c])):
			nearest = sys.maxint
			nearest_i = -1
			for i in range(len(class_means)):
				dist = np.linalg.norm(np.array(testing_feature_vector[c][img]) - np.array(class_means[i]))
				if dist < nearest:
					nearest = dist
					nearest_i = i
			if nearest_i != c:
				error_count += 1
			errors[c][nearest_i] += 1

	print "error count: " + str(error_count) + "\n"
	print_errors(title, errors)
		
print "applying classifier 3 on training data set"
classifier_3("Method 3 - 256 Pixels, Identity Covariance Matrices (trained on A, tested on A)", training_data)
print "applying classifier 3 on testing data set"
classifier_3("Method 3 - 256 Pixels, Identity Covariance Matrices (trained on A, tested on B)", testing_data)

def classifier_4(title, testing_data):
	"pixel space; assuming class-conditionally independent features"
	#training
	class_each_feature_frequency = cal_class_each_feature_frequency(training_feature_vector)
	#testing
	error_count = 0
	errors = [x[:] for x in [[0]*10]*10]
	testing_feature_vector = extract_features(testing_data, calculate_pixel_features)[0]
	for c in range(len(testing_feature_vector)):
		for img in range(len(testing_feature_vector[c])):
			largest_g_j = -sys.maxint-1
			largest_j = -1
			for j in range(len(class_each_feature_frequency)):
				s = 0
				for i in range(len(class_each_feature_frequency[j])):
					s += testing_feature_vector[c][img][i]*math.log(class_each_feature_frequency[j][i]) + \
						(1-testing_feature_vector[c][img][i])*math.log(1-class_each_feature_frequency[j][i])
				if s > largest_g_j:
					largest_g_j = s
					largest_j = j
			if largest_j != c:
				error_count += 1
			errors[c][largest_j] += 1

	print "error count: " + str(error_count) + "\n"
	print_errors(title, errors)

print "applying classifier 4 on training data set"
classifier_4("Method 4 - 256 Pixels, Class-conditionally independent features (trained on A, tested on A)", training_data)
print "applying classifier 4 on testing data set"
classifier_4("Method 4 - 256 Pixels, Class-conditionally independent features (trained on A, tested on B)", testing_data)
