import os
import sys
import subprocess
import re
import math
import numpy as np
import random
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from sklearn import svm

#1. Preprocess
#read data files (training + testing) A0-A9 B0-B9
#place each character in the center of a 16X16 image
#and generating corresponding new image in the new file

target_size = 16
os.system("rm -fr C-IV; tar xf C-IV.tar > /dev/null; mv C-V C-IV")
output = subprocess.check_output("find ./C-IV/*", shell=True)
datafiles = output.strip().split("\n")


for datafile in datafiles:
	data = []
	fileptr = open(datafile)
	for line in fileptr:
		if re.match('^C', line):
			data.append([])
			data[-1].append(line)
		else:
			data[-1].append(line)
	fileptr.close()
					
	writefile = datafile + ".txt"
	wfileptr = open(writefile, 'w')
	for image in data:
		tokens = re.split(' [hbw]', image[0])
		h = int(tokens[1])
		w = int(tokens[2])
	
                if target_size >= h and target_size >= w:	
			new_image = []
			top = (target_size - h)/2
			bottom = target_size - h - top
			left = (target_size - w)/2
			right = target_size - w - left
			new_image.append("C" + " h" + str(target_size) + " w" + str(target_size) + "\n")
			for i in range(top):
				new_image.append("."*target_size + "\n")
			for i in range(len(image)-1):
				new_image.append("."*left + image[i+1].strip() + "."*right + "\n")
			for i in range(bottom):
				new_image.append("."*target_size + "\n")
		
		elif target_size < h and target_size >= w:
			new_image = []
			top_trim = (h - target_size)/2
			bottom_trim = h - target_size - top_trim
                        left = (target_size - w)/2
                        right = target_size - w - left
                        new_image.append("C" + " h" + str(target_size) + " w" + str(target_size) + "\n");
			for i in range(len(image)-1-top_trim-bottom_trim):
				new_image.append("."*left + image[i+1+top_trim].strip() + "."*right + "\n")

                elif target_size >= h and target_size < w:
			new_image = []
			top = (target_size - h)/2
			bottom = target_size - h - top
                        left_trim = (w - target_size)/2
                        right_trim = w - target_size - left_trim
                        new_image.append("C" + " h" + str(target_size) + " w" + str(target_size) + "\n");
			for i in range(top):
				new_image.append("."*target_size + "\n")
                        for i in range(len(image)-1):
				new_image.append(image[i+1].strip()[left_trim:(-1)*right_trim] + "\n")
			for i in range(bottom):
				new_image.append("."*target_size + "\n")
                else:
			new_image = []
			top_trim = (h - target_size)/2
			bottom_trim = h - target_size - top_trim
                        left_trim = (w - target_size)/2
                        right_trim = w - target_size - left_trim
                        new_image.append("C" + " h" + str(target_size) + " w" + str(target_size) + "\n");
			for i in range(len(image)-1-top_trim-bottom_trim):
				new_image.append(image[i+1+top_trim].strip()[left_trim:(-1)*right_trim] + "\n")
				
		for i in range(len(new_image)):
			wfileptr.write(new_image[i])
	wfileptr.close()

	
#2. Extracting "Central Moment" image features
def load_from_data_files(datafiles):
	all_data = []
	for datafile in datafiles:
		data = []
		fileptr = open(datafile)
		for line in fileptr:
			if re.match('^C', line):
				data.append([])
			else:
				data[-1].append(line.strip())
		fileptr.close()
		all_data.append(data)
	return all_data


#create class names
classNames = []
output = subprocess.check_output("find ./C-IV/A-*.txt.txt", shell=True)
datafiles = output.strip().split("\n")
for datafile in datafiles:
	classNames.append(re.split('[-\.]', re.split('/', datafile)[-1])[1])

#load training data
output = subprocess.check_output("find ./C-IV/A-*.txt.txt", shell=True)
datafiles = output.strip().split("\n")
training_data = load_from_data_files(datafiles)

#load testing data B
output = subprocess.check_output("find ./C-IV/B-*.txt.txt", shell=True)
datafiles = output.strip().split("\n")
testing_data_B = load_from_data_files(datafiles)

#load testing data C
output = subprocess.check_output("find ./C-IV/C-*.txt.txt", shell=True)
datafiles = output.strip().split("\n")
testing_data_C = load_from_data_files(datafiles)

#load testing data D
output = subprocess.check_output("find ./C-IV/D-*.txt.txt", shell=True)
datafiles = output.strip().split("\n")
testing_data_D = load_from_data_files(datafiles)

def moment(p, q, image):
	"Calculate the moment of an image"
	mpq = 0.0
	for y in range(len(image)):
		for x in range(len(image[y])):
			if image[y][x] == 'x':
				mpq += math.pow(x+1,p)*math.pow(y+1,q)
	return mpq
			
def central_moment(p, q, image):
	"Calculate the central moment of an image"
	xc = 1.0 * moment(1,0,image) / moment(0,0,image)
	yc = 1.0 * moment(0,1,image) / moment(0,0,image)
	Mpq = 0.0
	for y in range(len(image)):
		for x in range(len(image[y])):
			if image[y][x] == 'x':
				Mpq += math.pow(x+1-xc,p)*math.pow(y+1-yc,q)
	return Mpq
				
def calculate_moment_features(image, pattern = None):
	"Calculate the moment feature vector of an 16X16 image"
	return [central_moment(1,1,image),	\
		central_moment(2,1,image),	\
		central_moment(1,2,image),	\
		central_moment(3,1,image),	\
		central_moment(2,2,image),	\
		central_moment(1,3,image),	\
		central_moment(4,1,image),	\
		central_moment(3,2,image),	\
		central_moment(2,3,image),	\
		central_moment(1,4,image),	\
		central_moment(5,1,image),	\
		central_moment(4,2,image),	\
		central_moment(3,3,image),	\
		central_moment(2,4,image),	\
		central_moment(1,5,image),	\
		central_moment(6,1,image),	\
		central_moment(5,2,image),	\
		central_moment(4,3,image),	\
		central_moment(3,4,image),	\
		central_moment(2,5,image)]

def calculate_moment_features_for_classifier_2(image, pattern = None):
	"Calculate the moment feature vector of an 16X16 image"
	return [moment(0,0,image),		\
		moment(0,1,image),		\
		moment(1,0,image),		\
		moment(0,2,image),		\
		moment(1,1,image),		\
		moment(2,0,image),		\
		moment(0,3,image),		\
		moment(1,2,image),		\
		moment(2,1,image),		\
		moment(3,0,image),		\
		moment(0,4,image),		\
		moment(1,3,image),		\
		moment(2,2,image),		\
		moment(3,1,image),		\
		moment(4,0,image),		\
		moment(0,5,image),		\
		moment(1,4,image),		\
		moment(2,3,image),		\
		moment(3,2,image),		\
		moment(4,1,image),		\
		moment(5,0,image),		\

		central_moment(1,1,image),	\
		central_moment(2,1,image),	\
		central_moment(1,2,image),	\
		central_moment(3,1,image),	\
		central_moment(2,2,image),	\
		central_moment(1,3,image),	\
		central_moment(4,1,image),	\
		central_moment(3,2,image),	\
		central_moment(2,3,image),	\
		central_moment(1,4,image),	\
		central_moment(5,1,image),	\
		central_moment(4,2,image),	\
		central_moment(3,3,image),	\
		central_moment(2,4,image),	\
		central_moment(1,5,image),	\
		central_moment(6,1,image),	\
		central_moment(5,2,image),	\
		central_moment(4,3,image),	\
		central_moment(3,4,image),	\
		central_moment(2,5,image),	\

		central_moment(1,6,image),	\
		central_moment(7,1,image),	\
		central_moment(6,2,image),	\
		central_moment(5,3,image),	\
		central_moment(4,4,image),	\
		central_moment(3,5,image),	\
		central_moment(2,6,image),	\
		central_moment(1,7,image),	\
		central_moment(8,1,image),	\
		central_moment(7,2,image),	\
		central_moment(6,3,image),	\
		central_moment(5,4,image),	\
		central_moment(4,5,image),	\
		central_moment(3,6,image),	\
		central_moment(2,7,image),	\
		central_moment(1,8,image)	]
		
def init_pixel_feature_pattern(mode):
	pattern = []
	if mode == "ALL_PIXELS":
		for y in range(target_size):
			pattern.append([])
			for x in range(target_size):
				pattern[-1].append(1)

	elif mode == "RANDOM_PIXELS":
		for y in range(target_size):
			pattern.append([])
			for x in range(target_size):
				pattern[-1].append(random.randint(1,16))

#	print "pattern changed to: \n"
#	for y in range(target_size):
#		for x in range(target_size):
#			if pattern[y][x] == 1:
#				print '*',
#			else:
#				print ' ',
#		print ""
#	print ""
	return pattern

def calculate_pixel_features(image, pattern):
	"Calculate the pixel feature vector of an 16X16 image"
	features = []
	for y in range(len(image)):
		for x in range(len(image[y])):
			if pattern[y][x] == 1:
				if (image[y][x] == 'x'):
					features.append(1)
				else:
					features.append(0)
	return features

def root_mean_square(feature_vector):
	"Calculate the root_mean_square among all feature vectors"
	rms = []
	for e in range(10):
		t = 0.0
		for c in range(len(feature_vector)):
			for img in range(len(feature_vector[c])):
				t += feature_vector[c][img][e]*feature_vector[c][img][e]
		t /= len(feature_vector)*len(feature_vector[0])
		rms.append(math.sqrt(t))
	return rms

def normalize(feature_vector, normalizing_vector):
	if normalizing_vector == None:
		rms = root_mean_square(feature_vector)
	else:
		rms = normalizing_vector

	for c in range(len(feature_vector)):
		for img in range(len(feature_vector[c])):
			for e in range(10):
				feature_vector[c][img][e] /= rms[e]
        return rms
				
def extract_features(data, feature_cal_func, pattern = None, whether_to_normalize=False, normalizing_vector=None):
	feature_vector = []
	for f in range(len(data)):
		feature_vector.append([])
		for img in range(len(data[f])):
			feature_vector[-1].append(feature_cal_func(data[f][img], pattern))
	if whether_to_normalize:
		nv = normalize(feature_vector, normalizing_vector)
		return [feature_vector, nv]
	return [feature_vector, None]

#3. train four classifiers
def cal_class_means(training_feature_vector):
	class_means = []
	for c in range(len(training_feature_vector)):
		mean = np.matrix([0.0]*len(training_feature_vector[c][0]))
		for img in range(len(training_feature_vector[c])):
			mean += np.matrix(training_feature_vector[c][img])
		mean = mean/len(training_feature_vector[c])
		class_means.append(mean)
	return class_means

def cal_class_covariances(training_feature_vector):
	class_covariance_matrices = []
	for c in range(len(training_feature_vector)):
		cf = np.zeros((len(training_feature_vector[c]), len(training_feature_vector[c][0])))
		for img in range(len(training_feature_vector[c])):
			cf[img] = training_feature_vector[c][img]
		cf = cf.T
		covariance_matrix = np.cov(cf)
		class_covariance_matrices.append(covariance_matrix)
	return class_covariance_matrices

def cal_class_each_feature_frequency(training_feature_vector):
	"""
		assuming conditionally-independent features and each feature only has val 0 or 1;
		this function calculates p_ij defined as P(x_i == 1 | w_j)
	"""
	class_each_feature_frequences = []
	for c in range(len(training_feature_vector)):
		each_feature_frequence = np.array([0.0]*len(training_feature_vector[c][0]))
		for img in range(len(training_feature_vector[c])):
			each_feature_frequence += np.array(training_feature_vector[c][img])
		each_feature_frequence = each_feature_frequence/len(training_feature_vector[c])
		
		#smoothing	
		for i in range(len(each_feature_frequence)):
			if each_feature_frequence[i] == 0.0:
				each_feature_frequence[i] = 1.0/(3*len(training_feature_vector[c]))
			elif each_feature_frequence[i] == 1.0:
				each_feature_frequence[i] = (3.0*len(training_feature_vector[c])-1)/(3.0*len(training_feature_vector[c]))
		
		class_each_feature_frequences.append(each_feature_frequence)
	return class_each_feature_frequences

def print_errors(title, error_count, errors):
	"print the errors nicely"
	error_type_II = [0]*10
	print "###CONFUSION TABLE"
	print "####" + title + "Error count: " + str(error_count)
	print "| True class/Classified as: \t", 
	for c in range(len(classNames)):
		print "|" + classNames[c] + "\t",
        print "|ErrorTypeI\t|"

	print "| ------------------\t\t|---\t|---\t|---\t|---\t|---\t|---\t|---\t|---\t|---\t|---\t|-------------\t|"
	for c in range(len(errors)):
		print "|\t\t" + classNames[c] + "\t\t|",
		error_type_I = 0
		for i in range(len(errors[c])):
			if errors[c][i] != 0:
				print str(errors[c][i]) + "\t|",
				if i != c:
					error_type_I += errors[c][i]
					error_type_II[i] += errors[c][i]
			else:
				print "\t|",
		print "\t" + str(error_type_I) + "\t|"

	print "|\t\tErrorTypeII\t|",
	total = 0
	for i in range(len(error_type_II)):
		total += error_type_II[i]
		print str(error_type_II[i]) + "\t|",
	print "\t" + str(total) + "\t|"

def l2_distance(x, y):
	s = 0
	for i in range(len(x)):
		s += (x[i] - y[i])*(x[i] - y[i])
	return np.sqrt(s)

def l4_distance(x, y):
	s = 0
	for i in range(len(x)):
		t = (x[i] - y[i])*(x[i] - y[i])
		s += t*t
	return np.sqrt(np.sqrt(s))

def swap(array, a, b):
	t = array[a]
	array[a] = array[b]
	array[b] = t

#3.1 && 3.2: extract moment features of training data
#Computational cost improvement for each classifier

def classifier_1(title, testing_data, does_print_error = True):
        "moment space; assuming identity matrix; so classify samples to the class whose class mean is nearest to the sample"
        #training
	pair = extract_features(training_data, calculate_moment_features, None, True)
	training_feature_vector = pair[0]
	normalizing_vector = pair[1]
        class_means = cal_class_means(training_feature_vector)

        #testing
        error_count = 0
        errors = [x[:] for x in [[0]*10]*10]
	classifiedAs = [x[:] for x in [[-1]*100]*10]
        testing_feature_vector = extract_features(testing_data, calculate_moment_features, None, True, normalizing_vector)[0]

	for c in range(len(testing_feature_vector)):
                for img in range(len(testing_feature_vector[c])):
                        nearest = sys.maxint
                        nearest_i = -1
                        for i in range(len(class_means)):
                                dist = np.linalg.norm(np.array(testing_feature_vector[c][img]) - np.array(class_means[i]))
                                if dist < nearest:
                                        nearest = dist
                                        nearest_i = i
                        if nearest_i != c:
                                error_count += 1
                        errors[c][nearest_i] += 1
			classifiedAs[c][img] = nearest_i

	if does_print_error == True:
        	print_errors(title, error_count, errors)
	return classifiedAs

classifier_1("Method 1 - Ten Moments, Identity Covariance Matrices (trained on A, tested on A)", training_data)
classifier_1("Method 1 - Ten Moments, Identity Covariance Matrices (trained on A, tested on B)", testing_data_B)
classifier_1("Method 1 - Ten Moments, Identity Covariance Matrices (trained on A, tested on C)", testing_data_C)
classifier_1("Method 1 - Ten Moments, Identity Covariance Matrices (trained on A, tested on D)", testing_data_D)

def classifier_2(title, testing_data, does_print_error = True):
        "moment space; assuming identical covariance matrices"
        #training
	pair = extract_features(training_data, calculate_moment_features, None, True)
	training_feature_vector = pair[0]
	normalizing_vector = pair[1]
        
	class_means = cal_class_means(training_feature_vector)
        class_covariance_matrices = cal_class_covariances(training_feature_vector)
        avg_covariance_matrix = np.zeros((len(training_feature_vector[0][0]), len(training_feature_vector[0][0])))
        for i in range(len(class_covariance_matrices)):
                avg_covariance_matrix += class_covariance_matrices[i]
        avg_covariance_matrix /= len(class_covariance_matrices)

        #testing
        error_count = 0
        errors = [x[:] for x in [[0]*10]*10]
	classifiedAs = [x[:] for x in [[-1]*100]*10]
        testing_feature_vector = extract_features(testing_data, calculate_moment_features, None, True, normalizing_vector)[0]

	for c in range(len(testing_feature_vector)):
                for img in range(len(testing_feature_vector[c])):
                        nearest = sys.maxint
                        nearest_i = -1
                        for i in range(len(class_means)):
                                x = np.array(testing_feature_vector[c][img])
                                mean_i = np.array(class_means[i])
                                temp = x - mean_i
                                dist = np.dot(np.dot(temp, np.linalg.inv(avg_covariance_matrix)),temp.T)
                                if dist < nearest:
                                        nearest = dist
                                        nearest_i = i
                        if nearest_i != c:
                                error_count += 1
                        errors[c][nearest_i] += 1
			classifiedAs[c][img] = nearest_i

	if does_print_error == True:
        	print_errors(title, error_count, errors)
	return classifiedAs

classifier_2("Method 2 - Ten Moments, Identical Covariance Matrices (trained on A, tested on A)", training_data)
classifier_2("Method 2 - Ten Moments, Identical Covariance Matrices (trained on A, tested on B)", testing_data_B)
classifier_2("Method 2 - Ten Moments, Identical Covariance Matrices (trained on A, tested on C)", testing_data_C)
classifier_2("Method 2 - Ten Moments, Identical Covariance Matrices (trained on A, tested on D)", testing_data_D)


def classifier_3_improved(title, testing_data, does_print_error = True):
	"Method 3: 1-NN in moment space under L2 metric (trained on A, tested on B)"
	##training
	pair = extract_features(training_data, calculate_moment_features, None, True)
	training_feature_vector = pair[0]
	normalizing_vector = pair[1]
	
	##testing
	error_count = 0
	errors = [x[:] for x in [[0]*10]*10]
	classifiedAs = [x[:] for x in [[-1]*100]*10]
	testing_feature_vector = extract_features(testing_data, calculate_moment_features, None, True, normalizing_vector)[0]

	for c in range(len(testing_feature_vector)):
		for img in range(len(testing_feature_vector[c])):
			nearest = sys.maxint
			nearest_i = []
			for ct in range(len(training_feature_vector)):
				for imgt in range(len(training_feature_vector[ct])):
					dist = 0
					a = testing_feature_vector[c][img]
					b = training_feature_vector[ct][imgt]
					for i in range(len(training_feature_vector[ct][imgt])):
						dist += (a[i]-b[i])*(a[i]-b[i])
						if dist > nearest:
							break
					if dist > nearest:
						continue
					if dist < nearest:
						nearest = dist
						nearest_i = [ct]
					else:
						nearest_i.append(ct)

			decision_class = nearest_i[random.randint(0, len(nearest_i)-1)]
			if decision_class != c:
				error_count += 1
			errors[c][decision_class] += 1
			classifiedAs[c][img] = decision_class

	if does_print_error == True:
		print_errors(title, error_count, errors)
	return classifiedAs

classifier_3_improved("Method 3: 1-NN in moment space under L2 metric (trained on A, tested on A)", training_data)
classifier_3_improved("Method 3: 1-NN in moment space under L2 metric (trained on A, tested on B)", testing_data_B)
classifier_3_improved("Method 3: 1-NN in moment space under L2 metric (trained on A, tested on C)", testing_data_C)
classifier_3_improved("Method 3: 1-NN in moment space under L2 metric (trained on A, tested on D)", testing_data_D)

def classifier_4_improved(title, testing_data, does_print_error = True):
	"Method 4: 5-NN in moment space under L2 metric (trained on A, tested on B)"
	##training
	pair = extract_features(training_data, calculate_moment_features, None, True)
	training_feature_vector = pair[0]
	normalizing_vector = pair[1]
	
	##testing
	error_count = 0
	errors = [x[:] for x in [[0]*10]*10]
	classifiedAs = [x[:] for x in [[-1]*100]*10]
	testing_feature_vector = extract_features(testing_data, calculate_moment_features, None, True, normalizing_vector)[0]

	for c in range(len(testing_feature_vector)):
		for img in range(len(testing_feature_vector[c])):
			nearests = [sys.maxint]*6
			nearest_is = [-1]*6
			for ct in range(len(training_feature_vector)):
				for imgt in range(len(training_feature_vector[ct])):
					nearests[5] = 0
					a = testing_feature_vector[c][img]
					b = training_feature_vector[ct][imgt]
					for i in range(len(training_feature_vector[ct][imgt])):
						nearests[5] += (a[i] - b[i]) * (a[i] - b[i])
						if nearests[5] > nearests[4]:
							break
					if nearests[5] > nearests[4]:
						continue

					nearest_is[5] = ct
					i = 5
					while i >= 1:
						if nearests[i] < nearests[i-1] or nearests[i] == nearests[i-1] and random.random() > 0.5:
							swap(nearests, i, i-1)
							swap(nearest_is, i, i-1)
							i -= 1
						else:
							break
			classes = [0]*10
			for r in range(len(nearest_is)-1):
				classes[nearest_is[r]] += 1

			m = 0
			mc = []
			for r in range(len(nearest_is)-1):
				if classes[nearest_is[r]] > m:
					m = classes[nearest_is[r]]
					mc = [nearest_is[r]]
				elif classes[nearest_is[r]] == m:
					mc.append(nearest_is[r])
			
			#randomly select one class as the decision
			decision_class = mc[random.randint(0, len(mc)-1)]
			if decision_class != c:
				error_count += 1
			errors[c][decision_class] += 1
			classifiedAs[c][img] = decision_class

	if does_print_error == True:
		print_errors(title, error_count, errors)
	return classifiedAs
	
classifier_4_improved("Method 4: 5-NN in moment space under L2 metric (trained on A, tested on A)", training_data)
classifier_4_improved("Method 4: 5-NN in moment space under L2 metric (trained on A, tested on B)", testing_data_B)
classifier_4_improved("Method 4: 5-NN in moment space under L2 metric (trained on A, tested on C)", testing_data_C)
classifier_4_improved("Method 4: 5-NN in moment space under L2 metric (trained on A, tested on D)", testing_data_D)

def classifier_5_improved(title, testing_data, does_print_error = True):
        "Method 5: moment space; using combination of central moments and moments; assuming identical covariance matrices"
        #training
	pair = extract_features(training_data, calculate_moment_features_for_classifier_2, None, True)
	training_feature_vector = pair[0]
	normalizing_vector = pair[1]
        
	class_means = cal_class_means(training_feature_vector)
        class_covariance_matrices = cal_class_covariances(training_feature_vector)
        avg_covariance_matrix = np.zeros((len(training_feature_vector[0][0]), len(training_feature_vector[0][0])))
        for i in range(len(class_covariance_matrices)):
                avg_covariance_matrix += class_covariance_matrices[i]
        avg_covariance_matrix /= len(class_covariance_matrices)

        #testing
        error_count = 0
        errors = [x[:] for x in [[0]*10]*10]
	classifiedAs = [x[:] for x in [[-1]*100]*10]
        testing_feature_vector = extract_features(testing_data, calculate_moment_features_for_classifier_2, None, True, normalizing_vector)[0]

	for c in range(len(testing_feature_vector)):
                for img in range(len(testing_feature_vector[c])):
                        nearest = sys.maxint
                        nearest_i = -1
                        for i in range(len(class_means)):
                                x = np.array(testing_feature_vector[c][img])
                                mean_i = np.array(class_means[i])
                                temp = x - mean_i
                                dist = np.dot(np.dot(temp, np.linalg.inv(avg_covariance_matrix)),temp.T)
                                if dist < nearest:
                                        nearest = dist
                                        nearest_i = i
                        if nearest_i != c:
                                error_count += 1
                        errors[c][nearest_i] += 1
			classifiedAs[c][img] = nearest_i

	if does_print_error == True:
        	print_errors(title, error_count, errors)
	return classifiedAs

classifier_5_improved("Method 5 - Moment space; using combination of central moments and moments; Identical Covariance Matrices (trained on A, tested on A)", training_data)
classifier_5_improved("Method 5 - Moment space; using combination of central moments and moments; Identical Covariance Matrices (trained on A, tested on B)", testing_data_B)
classifier_5_improved("Method 5 - Moment space; using combination of central moments and moments; Identical Covariance Matrices (trained on A, tested on C)", testing_data_C)
classifier_5_improved("Method 5 - Moment space; using combination of central moments and moments; Identical Covariance Matrices (trained on A, tested on D)", testing_data_D)

def classifier_6(title, testing_data, does_print_error = True):
	"Method 6: pixel space; assuming identity matrix; so classify samples to the class whose class mean is nearest to the sample"
	#training
	pattern = init_pixel_feature_pattern("ALL_PIXELS")
	training_feature_vector = extract_features(training_data, calculate_pixel_features, pattern)[0]
	class_means = cal_class_means(training_feature_vector)

	#testing
	error_count = 0
	errors = [x[:] for x in [[0]*10]*10]
	classifiedAs = [x[:] for x in [[-1]*100]*10]

	testing_feature_vector = extract_features(testing_data, calculate_pixel_features, pattern)[0]
	
	for c in range(len(testing_feature_vector)):
		for img in range(len(testing_feature_vector[c])):
			nearest = sys.maxint
			nearest_i = -1
			for i in range(len(class_means)):
				dist = np.linalg.norm(np.array(testing_feature_vector[c][img]) - np.array(class_means[i]))
				if dist < nearest:
					nearest = dist
					nearest_i = i
			if nearest_i != c:
				error_count += 1
			errors[c][nearest_i] += 1
			classifiedAs[c][img] = nearest_i

	if does_print_error == True:
		print_errors(title, error_count, errors)
	return classifiedAs

#classifier_6("Method 6 - 256 Pixels, Identity Covariance Matrices (trained on A, tested on A)", training_data)
#classifier_6("Method 6 - 256 Pixels, Identity Covariance Matrices (trained on A, tested on B)", testing_data_B)
#classifier_6("Method 6 - 256 Pixels, Identity Covariance Matrices (trained on A, tested on C)", testing_data_C)
#classifier_6("Method 6 - 256 Pixels, Identity Covariance Matrices (trained on A, tested on D)", testing_data_D)

def classifier_7(title, testing_data, does_print_error =  True):
	"Method 7: pixel space; assuming class-conditionally independent features"
	#training
	pattern = init_pixel_feature_pattern("ALL_PIXELS")
	training_feature_vector = extract_features(training_data, calculate_pixel_features, pattern)[0]
	class_each_feature_frequency = cal_class_each_feature_frequency(training_feature_vector)
	
	#testing
	error_count = 0
	errors = [x[:] for x in [[0]*10]*10]
	classifiedAs = [x[:] for x in [[-1]*100]*10]
	
	testing_feature_vector = extract_features(testing_data, calculate_pixel_features, pattern)[0]
	
	for c in range(len(testing_feature_vector)):
		for img in range(len(testing_feature_vector[c])):
			largest_g_j = -sys.maxint-1
			largest_j = -1
			for j in range(len(class_each_feature_frequency)):
				s = 0
				for i in range(len(class_each_feature_frequency[j])):
					s += testing_feature_vector[c][img][i]*math.log(class_each_feature_frequency[j][i]) + \
						(1-testing_feature_vector[c][img][i])*math.log(1-class_each_feature_frequency[j][i])
				if s > largest_g_j:
					largest_g_j = s
					largest_j = j
			if largest_j != c:
				error_count += 1
			errors[c][largest_j] += 1
			classifiedAs[c][img] = largest_j

	if does_print_error == True:
		print_errors(title, error_count, errors)
	return classifiedAs

#classifier_7("Method 7 - 256 Pixels, Class-conditionally independent features (trained on A, tested on A)", training_data)
#classifier_7("Method 7 - 256 Pixels, Class-conditionally independent features (trained on A, tested on B)", testing_data_B)
#classifier_7("Method 7 - 256 Pixels, Class-conditionally independent features (trained on A, tested on C)", testing_data_C)
#classifier_7("Method 7 - 256 Pixels, Class-conditionally independent features (trained on A, tested on D)", testing_data_D)


def classifier_8_improved(title, testing_data, does_print_error = True):
	"Imroved Method 8: 1-NN in pixel space under L2 metric (trained on A, tested on B)"
	##training
	pattern = init_pixel_feature_pattern("ALL_PIXELS")
	training_feature_vector = extract_features(training_data, calculate_pixel_features, pattern)[0]
	
	##testing
	error_count = 0
	errors = [x[:] for x in [[0]*10]*10]
	classifiedAs = [x[:] for x in [[-1]*100]*10]
	testing_feature_vector = extract_features(testing_data, calculate_pixel_features, pattern)[0]
	for c in range(len(testing_feature_vector)):
		for img in range(len(testing_feature_vector[c])):
			nearest = sys.maxint
			nearest_i = []
			for ct in range(len(training_feature_vector)):
				for imgt in range(len(training_feature_vector[ct])):
					dist = 0
					a = testing_feature_vector[c][img]
					b = training_feature_vector[ct][imgt]
					for i in range(len(training_feature_vector[ct][imgt])):
						dist += a[i] ^ b[i]
						if dist > nearest:
							break

					if dist > nearest:
						continue
					if dist < nearest:
						nearest = dist
						nearest_i = [ct]
					else:
						nearest_i.append(ct)

			decision_class = nearest_i[random.randint(0, len(nearest_i)-1)]
			if decision_class != c:
				error_count += 1
			errors[c][decision_class] += 1
			classifiedAs[c][img] = decision_class

	if does_print_error == True:
		print_errors(title, error_count, errors)
	return classifiedAs
		
#classifier_8_improved("Improved Method 8: 1-NN in pixel space under L2 metric (trained on A, tested on A)", training_data)
#classifier_8_improved("Improved Method 8: 1-NN in pixel space under L2 metric (trained on A, tested on B)", testing_data_B)
#classifier_8_improved("Improved Method 8: 1-NN in pixel space under L2 metric (trained on A, tested on C)", testing_data_C)
#classifier_8_improved("Improved Method 8: 1-NN in pixel space under L2 metric (trained on A, tested on D)", testing_data_D)

def classifier_9_improved(title, testing_data, does_print_error = True):
	"Improved Method 9: 5-NN in pixel space under L2 metric"
	##training
	pattern = init_pixel_feature_pattern("ALL_PIXELS")
	training_feature_vector = extract_features(training_data, calculate_pixel_features, pattern)[0]
	
	##testing
	error_count = 0
	errors = [x[:] for x in [[0]*10]*10]
	classifiedAs = [x[:] for x in [[-1]*100]*10]
	testing_feature_vector = extract_features(testing_data, calculate_pixel_features, pattern)[0]
	for c in range(len(testing_feature_vector)):
		for img in range(len(testing_feature_vector[c])):
			nearests = [sys.maxint]*6
			nearest_is = [-1]*6
			for ct in range(len(training_feature_vector)):
				for imgt in range(len(training_feature_vector[ct])):
					nearests[5] = 0
					a = testing_feature_vector[c][img]
					b = training_feature_vector[ct][imgt]
					for i in range(len(training_feature_vector[ct][imgt])):
						nearests[5] += a[i] ^ b[i]
						if nearests[5] > nearests[4]:
							break
					if nearests[5] > nearests[4]:
						continue

					nearest_is[5] = ct
					i = 5
					while i >= 1:
						if nearests[i] < nearests[i-1] or nearests[i] == nearests[i-1] and random.random() > 0.5:
							swap(nearests, i, i-1)
							swap(nearest_is, i, i-1)
							i -= 1
						else:
							break
			classes = [0]*10
			for r in range(len(nearest_is)-1):
				classes[nearest_is[r]] += 1
			
			m = 0
			mc = []
			for r in range(len(nearest_is)-1):
				if classes[nearest_is[r]] > m:
					m = classes[nearest_is[r]]
					mc = [nearest_is[r]]
				elif classes[nearest_is[r]] == m:
					mc.append(nearest_is[r])
			
			#randomly select one class as the decision
			decision_class = mc[random.randint(0, len(mc)-1)]
			if decision_class != c:
				error_count += 1
			errors[c][decision_class] += 1
			classifiedAs[c][img] = decision_class

	if does_print_error == True:
		print_errors(title, error_count, errors)
	return classifiedAs

#classifier_9_improved("Improved Method 9: 5-NN in pixel space under L2 metric (trained on A, tested on A)", training_data)
#classifier_9_improved("Improved Method 9: 5-NN in pixel space under L2 metric (trained on A, tested on B)", testing_data_B)
#classifier_9_improved("Improved Method 9: 5-NN in pixel space under L2 metric (trained on A, tested on C)", testing_data_C)
#classifier_9_improved("Improved Method 9: 5-NN in pixel space under L2 metric (trained on A, tested on D)", testing_data_D)


def classifier_10(title, testing_data, does_print_error =  True):
	"Method 10 - random pixel patterns, same classifier run for X times"
	#testing
	error_count = 0
	errors = [x[:] for x in [[0]*10]*10]
	classifiedAs = [x[:] for x in [[-1]*100]*10]
	clasAs_all = []
	classifierAmount = 1
	for i in range(classifierAmount):
		clasAs = classifier_2("", testing_data, False)
		clasAs_all.append(clasAs)
	
	for c in range(10):
		for img in range(100):
			votes = [0]*10
			for i in range(classifierAmount):
				votes[clasAs_all[i][c][img]] += 1
			most_votes = votes[0]
			most_votes_index = 0
			j = 1
			while j < 10:
				if votes[j] > most_votes:
					most_votes = votes[j]
					most_votes_index = j
				j = j + 1

			if most_votes_index != c:
				error_count += 1
			errors[c][most_votes_index] += 1
			classifiedAs[c][img] = most_votes_index

	if does_print_error == True:
		print_errors(title, error_count, errors)
	return classifiedAs

#classifier_10("Method 10 - random pixel patterns, same classifier run for X times (trained on A, tested on A)", training_data)
#classifier_10("Method 10 - random pixel patterns, same classifier run for X times (trained on A, tested on B)", testing_data_B)
#classifier_10("Method 10 - random pixel patterns, same classifier run for X times (trained on A, tested on C)", testing_data_C)
#classifier_10("Method 10 - random pixel patterns, same classifier run for X times (trained on A, tested on D)", testing_data_D)


def classifier_11_training(training_data):
	"SVM 256 pixel features"
	pattern = init_pixel_feature_pattern("ALL_PIXELS")
	training_feature_vector = extract_features(training_data, calculate_pixel_features, pattern)[0]

	ds = []
	label = []
	for c in range(len(training_feature_vector)):
		for img in range(len(training_feature_vector[c])):
			ds.append(training_feature_vector[c][img])
			label.append(c)

	clf = svm.SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.50, degree=5, gamma=0.0, kernel='linear', max_iter=-1,
	probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False)
	clf.fit(ds, label)

	return clf

def classifier_11_testing(title, testing_data, model, does_print_error = True):
	"SVM 256 pixel features"
	##testing
	error_count = 0
	errors = [x[:] for x in [[0]*10]*10]
	classifiedAs = [x[:] for x in [[-1]*100]*10]
	pattern = init_pixel_feature_pattern("ALL_PIXELS")
	testing_feature_vector = extract_features(testing_data, calculate_pixel_features, pattern)[0]
	
	test_ds = []
	for c in range(10):
		for img in range(100):
			test_ds.append(testing_feature_vector[c][img])

	output = model.predict(test_ds)

	for c in range(10):
		for img in range(100):	
			if output[c*100 + img] != c:
				error_count += 1
			errors[c][output[c*100 + img]] += 1
			classifiedAs[c][img] = output[c*100 + img]

	if does_print_error == True:
		print_errors(title, error_count, errors)
	return classifiedAs

model = classifier_11_training(training_data)
classifier_11_testing("Method 11: SVM 256 pixel features (trained on A, tested on A)", training_data, model)
classifier_11_testing("Method 11: SVM 256 pixel features (trained on A, tested on B)", testing_data_B, model)
classifier_11_testing("Method 11: SVM 256 pixel features (trained on A, tested on C)", testing_data_C, model)
classifier_11_testing("Method 11: SVM 256 pixel features (trained on A, tested on D)", testing_data_D, model)
