from scipy.io import loadmat, savemat
from random import random
import numpy as np
import math
from hmmlearn.hmm import GaussianHMM
from sklearn import svm
import warnings

import config
from feature import load_features


warnings.filterwarnings("ignore")


def load_data_fake(partial = 0.5):
	dt = []
	sample_num = int(partial * config.NUM_TRAIN_SAMPLE)
	for i in range(sample_num):
		window_feature = []
		for j in range(config.NUM_SEGMENT_PER_SAMPLE):
			feature = []
			for k in range(config.NUM_FEATURE):
				feature.append(random())
			window_feature.append(feature)
		dt.append(window_feature)

	# label = [0] * int(config.NUM_TRAIN_SAMPLE * 0.5) + [1] * int(config.NUM_TRAIN_SAMPLE * 0.5)
	return np.array(dt)

def svm_train_window_partition(dt, k = 5):
	num_sample, num_segment, num_feature = dt.shape
	data_partition = []
	for i in range(num_sample):
		sample = dt[i]
		#for j in range(math.floor(float(num_segment) / float(k))):
			#data_partition.append(sample[j : j + k])
		for j in range(num_segment - k + 1):
			data_partition.append((sample[j: j + k]).flatten())
	return np.array(data_partition)

def svm_test_window_partition(dt, k = config.WINDOW_K):
	num_sample, num_segment, num_feature = dt.shape
	data_partition = []
	for i in range(num_sample):
		sample = dt[i]
		#for j in range(math.floor(float(num_segment) / float(k))):
			#data_partition.append(sample[j : j + k])
		for j in range(num_segment - k + 1):
			data_partition.append((sample[j: j + k]).flatten())
	return np.array(data_partition)

def train_window_partition(dt, k = 5):
	num_sample, num_segment, num_feature = dt.shape
	data_partition = []
	for i in range(num_sample):
		sample = dt[i]
		#for j in range(math.floor(float(num_segment) / float(k))):
			#data_partition.append(sample[j : j + k])
		for j in range(num_segment - k + 1):
			data_partition.append(sample[j: j + k])
	return np.array(data_partition)

def test_window_partition(dt, k = config.WINDOW_K):
	num_sample, num_segment, num_feature = dt.shape
	data_partition = []
	for i in range(num_sample):
		sample = dt[i]
		#for j in range(math.floor(float(num_segment) / float(k))):
			#data_partition.append(sample[j : j + k])
		for j in range(num_segment - k + 1):
			data_partition.append(sample[j: j + k])
	return np.array(data_partition)


def train_hmm(data, num_state, k = 5):
	window_num = data.shape[0]
	dt = np.concatenate(data)
	dt_len = [k] * window_num
	model = GaussianHMM(n_components=num_state, covariance_type="diag").fit(dt, dt_len)
	model.fit(dt, dt_len)
	return model


def validate(model1, model2, window_data):
	p1 = model1.predict(window_data)
	s1 = model1.score(window_data)
	p2 = model2.predict(window_data)
	s2 = model2.score(window_data)
	class_feature = []
	class_feature.extend(p1)
	class_feature.append(s1)
	class_feature.extend(p2)
	class_feature.append(s2)

	print(class_feature)

	return class_feature


def calc_class_features(model1, model2, data):
	features = []
	for i in range(data.shape[0]):
		window_data = data[i]
		features.append(validate(model1, model2, window_data))
	return np.array(features)


def concat_features(normal_features, water_features):
	features = np.concatenate([normal_features, water_features])
	labels = np.concatenate([[0] * normal_features.shape[0], [1] * water_features.shape[0]])
	return features, labels

def train():
	#data = load_data(config.TRAIN_DIR)
	normal_data = train_window_partition(load_features(config.TRAIN_AIR_DIR)[0])
	water_data = train_window_partition(load_features(config.TRAIN_WATER_DIR)[0])

	print("Load train data ok!")
	print("\tPartition with window size = {}, now data size = {}".format(config.WINDOW_K, water_data.shape))

	normal_model = train_hmm(normal_data, config.HMM_NORMAL_STATE)
	water_model = train_hmm(water_data, config.HMM_WATER_STATE)

	print("HMM model train ok!")

	normal_features = calc_class_features(normal_model, water_model, normal_data)
	water_features = calc_class_features(normal_model, water_model, water_data)
	features, labels = concat_features(normal_features, water_features)

	print("Prepare for SVM classifer train...")
	print("\tFeature size = {}".format(features.shape))

	classifer = svm.SVC(kernel='linear')  
	classifer.fit(features, labels)

	print("SVM classifer train ok!")

	predict_labels = classifer.predict(features)
	print(predict_labels)
	print(labels)

	num = labels.shape[0]
	count = 0
	for i in range(num):
		if predict_labels[i] == labels[i]:
			count += 1
	print("Precision on training set = {}".format(float(count) / float(num)))

	return normal_model, water_model, classifer

def simpleSVMtrain():
	normal_data = svm_train_window_partition(load_features(config.TRAIN_AIR_DIR)[0])
	water_data = svm_train_window_partition(load_features(config.TRAIN_WATER_DIR)[0])
	features, labels = concat_features(normal_data, water_data)
	classifier = svm.SVC(kernel='linear')
	classifier.fit(features, labels)
	print(features.shape)
	return classifier


if __name__ == '__main__':
	train()