# -*- coding:utf-8 -*-
import numpy as np
import random
import time

from sklearn.neighbors import KNeighborsClassifier 
# 特征提取方法 + KNN分类器

from preprocess_data import get_data

class improve_lvw(object):
	"""docstring for improve_lvw"""
	def __init__(self, num_feature = 40,population_size = 100,excellent_size = 10,K = 20,valid_num = 1000,n_neighbors=5):
		super(improve_lvw, self).__init__()
		features,labels = get_data('训练集')
		test_features,test_labels = get_data('测试集')
		features_mean = features.mean(axis = 0,keepdims = True)
		features_std = features.std(axis = 0,keepdims = True)
		features = (features - features_mean)/(features_std+1e-8) # 利用广播机制
		test_features = (test_features - features_mean)/(features_std+1e-8)
		# 改进拉斯维加斯算法
		# 改进思路：在上一论最好特征集合的基础上进行随机生成很多特征集合，有点遗传算法的味道
		# arr.take([0,2],axis=0) 选择特定下标函数
		# num_feature = 40   # 最开始挑选的特征个数为100，会衰减
		# population_size = 100 # 每一组的数量为100
		# excellent_size = 10   # 每一轮选拔出来的数量
		# K = 10

		train_features = features[:-valid_num]
		train_labels = labels[:-valid_num]
		valid_features = features[-valid_num:]
		valid_labels = labels[-valid_num:]

		best_features = None

		all_idx = [i for i in range(440)] #共有440维度特征
		population = [random.sample(all_idx,num_feature) for j in range(population_size)]

		for k in range(K): # 每次迭代
			# step 1. 计算种群中每个个体的分数，生成一个优秀集合
			excellent_list = []
			min_score = 0
			min_idx = -1
			t = time.time()
			for k2,p in enumerate(population):
				clf = KNeighborsClassifier(n_neighbors=n_neighbors)
				clf.fit(train_features.take(p,axis=1), train_labels) # 挑选特征
				score = clf.score(valid_features.take(p,axis=1),valid_labels)
				if len(excellent_list)<excellent_size or score>min_score:
					if len(excellent_list) == excellent_size: del excellent_list[min_idx]
					excellent_list.append({'score':score,'features':p})
					min_score = 1
					for i,excellent in enumerate(excellent_list):
						if excellent['score']<min_score:
							min_score = excellent['score']
							min_idx = i
				if (k2+1)%10==0:
					print(k2+1,min_score)
			end_t = time.time()


			# step 2. 重新生成种群
			excellent_list = sorted(excellent_list,key=lambda x: x['score'],reverse=True)
			population = [excellent['features'] for excellent in excellent_list]
			best_features = population[0].copy()

			# 再在测试集上测试
			clf = KNeighborsClassifier(n_neighbors=n_neighbors)
			clf.fit(train_features.take(best_features,axis=1), train_labels) # 挑选特征
			test_time = time.time()
			score = clf.score(test_features.take(best_features,axis=1),test_labels)

			print('[epoch]:%d,score:%.2f'%(k,excellent_list[0]['score']))
			print('[time] %.2f'%(end_t-t))
			print('[test ] score:%.2f,test_time:%.2f'%(score,time.time()-test_time))
			candidate = [f for excellent in excellent_list for f in excellent['features']]
			# 候选特征集合
			for j in range(population_size-excellent_size):
				population.append(list(set(random.sample(candidate,num_feature))))
		self.feature = best_features

	def get_feature(self,n_feature=440):
		n_feature = min(n_feature,len(self.feature))
		return self.feature[:n_feature]

