# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
# from tkinter import _flatten  # 将二维列表转成一维元组
import math
import itertools
import functools, time
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn import preprocessing

datasets = [
	"Two_Patterns",						#0
	"ChlorineConcentration",			#1
	"wafer",							#2
	"MedicalImages",					#3
	"FaceAll",							#4
	"OSULeaf",							#5
	"Adiac",							#6
	"SwedishLeaf",						#7
	"yoga",								#8
	"fish",								#9
	"Lighting7",						#10
	"Lighting2",						#11
	"Trace",							#12
	"synthetic_control",				#13
	"FacesUCR",							#14
	"CinC_ECG_torso",					#15
	"MALLAT",							#16
	"Symbols",							#17
	"Coffee",							#18
	"ECG200",							#19
	"ECG5000",							#20
	"FaceFour",							#21
	"OliveOil",							#22
	"Gun_Point",						#23
	"Beef",								#24
	"DiatomSizeReduction",				#25
	"CBF",								#26
	"ECGFiveDays",						#27
	"TwoLeadECG",						#28
	"SonyAIBORobotSurfaceII",			#29
	"MoteStrain",						#30
	"ItalyPowerDemand",					#31
	"SonyAIBORobotSurface",				#32
	"Haptics",							#33
	"InlineSkate",						#34
	"50words",							#35
	"Cricket_Y",						#36
	"Cricket_X",						#37
	"Cricket_Z",						#38
	"WordsSynonyms",					#39
	"uWaveGestureLibrary_Z",			#40
	"uWaveGestureLibrary_Y",			#41
	"uWaveGestureLibrary_X",			#42
	"NonInvasiveFatalECG_Thorax1",		#43
	"NonInvasiveFatalECG_Thorax2",		#44
	"StarLightCurves",					#45
]

def recordTime(fn):
    @functools.wraps(fn)
    def wrapper(*args, **kw):
        startTime = time.time()
        tmp = fn(*args, **kw)
        endTime = time.time()
        print('%s executed in %s s' % (fn.__name__, endTime - startTime ))
        return tmp
    return wrapper

def loadDatasets(dataSetNum):
	'''
	Desc: 读取第dataSetNum组(从0开始)数据集，把训练/测试用的输入和答案标记分别输出
	'''
	fileName = './' + datasets[dataSetNum] + '/' + datasets[dataSetNum] + '_TRAIN'
	dfData = pd.read_csv(fileName, header = None)
	trainLabels = dfData.iloc[:,0]
	print("trainLabels's size : ",trainLabels.shape)
	trainInput = dfData.iloc[:,1:]
	print("trainInput size : ", trainInput.shape)

	fileName = './' + datasets[dataSetNum] + '/' + datasets[dataSetNum] + '_TEST'
	dfData = pd.read_csv(fileName, header = None)
	testLabels = dfData.iloc[:,0]
	print("testLabels's size : ",testLabels.shape)
	testInput = dfData.iloc[:,1:]
	print("testInput size : ", testInput.shape)
	
	return trainInput, trainLabels, testInput, testLabels

def zNorm(dfData):
	'''
	Desc:输入dataform格式的数据，对每个元素进行z标准化，输出np.array格式数据
	'''
	row = dfData.shape[0]
	col = dfData.shape[1]
	mat = np.array(dfData)
	for i in range(0, row):
		tmpMean = mat[i].mean()
		tmpStd = mat[i].std()
		mat[i] = np.array(list(map(lambda x : (x - tmpMean)/tmpStd, mat[i])))

	return mat

def combination(alphaNum, repeatNum):
	'''
	Desc: 该函数能直接得到用前alphaNum个字母, 组合长度为repeatNum的word的列表
	比如repeatNum(4,4)就能返回['aaaa','aaab',……,'dddd']
	'''
	alpha = "abcdefghijklmnopqrstuvwxyz"
	return list(map(''.join, itertools.product(alpha[0:alphaNum], repeat = repeatNum)))

@recordTime
def slideWindow(inputData, windowSize, wordSize):
	'''
	Desc: 借助滑窗做PAA,得到一系列word特征
	'''
	def valueToAlphabet(x):
		if x <= -0.67:
			return 'a'
		if x > -0.67 and x <= 0:
			return 'b'
		if x > 0 and x <= 0.67:
			return 'c'
		if x > 0.67:
			return 'd' 
	
	paaDataMat = []
	colLength = inputData.shape[1]
	frameLength = windowSize / wordSize
	for aRow in inputData:
		wordList = []
		for i in range(0, colLength - windowSize + 1):
			window = aRow[i:i + windowSize]   # 滑窗
			word = ''
			frameSum = 0
			currentFrameSize = 0
			remaining = 0
			for j in range(0, windowSize):   # 在窗口中做PAA
				remaining = frameLength - currentFrameSize
				if remaining > 1:
					frameSum += window[j]
					currentFrameSize += 1
				else:
					frameSum += remaining * window[j]
					currentFrameSize += remaining
				if  currentFrameSize == frameLength:
					word += valueToAlphabet(frameSum / frameLength)
					frameSum = (1 - remaining) * window[j]
					currentFrameSize = 1 - remaining		
			wordList.append(word)
		# numerosity reduction
		wordListLength = len(wordList)
		reducedWordList = []
		for i in range(1, wordListLength):
			if wordList[i] != wordList[i - 1]:
				reducedWordList.append(wordList[i])
		paaDataMat.append(reducedWordList)
	return paaDataMat

def wordCount(wordData):
	uniqueWord = set(wordData)
	wordCount = {}
	wordMax = 0
	for w in uniqueWord:
		wordCount[w] = wordData.count(w)

	return wordCount

@recordTime
def makeFeature(paaData):
	'''
	Desc: 统计paa的word, 制作特征列
	'''
	wordLength = len(paaData[0][0])
	wordList = combination(4, wordLength)  # 特征列
	featureMat = []
	for aRow in paaData:
		featureMat.append(list(map(lambda x : aRow.count(x), wordList)))

	return featureMat	

def dealResult(predRes):
	newRes = []
	for aRow in predRes:
		newRes.append(aRow.tolist().index(max(aRow)))
	return newRes

@recordTime
def trainAndPredict(windowSize, wordSize, trainInput, trainLabels):
	'''
	Desc: 在某一滑窗方案下用lgb训练模型, 并预测训练集
	'''
	trainInput = slideWindow(trainInput, windowSize, wordSize)
	trainInput = makeFeature(trainInput)
	
	numClass = len(set(trainLabels)) # 分类数
	params = {
		'application':'multiclass',
		'num_class':numClass,
		'boosting_type': 'gbdt',
    	'metric':'multi_error',
    	'seed':0,
    	'num_leaves': 30,
    	'learning_rate': 0.05,
		'verbose':0,
    }

	trainInput = np.array(trainInput)
	trainLabels = np.array(trainLabels)
	xTrain, xTest, yTrain, yTest = train_test_split(trainInput, trainLabels, test_size = 0.2, random_state = 100)
	lgbTrain = lgb.Dataset(xTrain, yTrain)
	lgbEval = lgb.Dataset(xTest, yTest)
	lgbAll = lgb.Dataset(trainInput, trainLabels)
	numRound = 10000
	evalError = {}  # 存储实时的错误率结果
	modelTrain = lgb.train(
		params,
		lgbTrain,
		numRound,
		valid_sets = lgbEval, 
		valid_names = 'getErrorRate',
		evals_result = evalError,
		early_stopping_rounds = 15)

	trainErrorRate = evalError.get('getErrorRate').get('multi_error')[modelTrain.best_iteration - 1]
    # 用分出的部分训练集测出的最佳迭代次数在, 全体训练集中重新训练
	model = lgb.train(params, lgbAll, modelTrain.best_iteration)

	return trainErrorRate, model

def testPredict(model, windowSize, wordSize, testInput, testLabels):
	'''
	Desc: 直接拿训练好的模型来预测测试集
	'''
	# 拿测试集来测试	
	testInput = slideWindow(testInput, windowSize, wordSize)
	testInput = makeFeature(testInput)
	preds = model.predict(testInput)
	res = dealResult(preds)
	# 求错误率
	errorCnt = 0
	for i in range(0, len(res)):
		if testLabels[i] != res[i]:
			errorCnt += 1
	testErrorRate = errorCnt / len(res)
	print('testErrorRate : ', testErrorRate)
	return testErrorRate


def testOneDataSet(dataSetNum):
	'''
	Desc: 选取一组数据集(第dataSetNum组), 用不同的滑窗方式来比较数据预测结果, 迭代取优
	'''
	trainInput, trainLabels, testInput, testLabels = loadDatasets(dataSetNum)	
	trainInput = zNorm(trainInput)
	testInput = zNorm(testInput)

	lE = preprocessing.LabelEncoder()
	trainLabels = lE.fit(trainLabels).transform(trainLabels)
	testLabels = lE.transform(testLabels)

	colLengh = len(trainInput[0])
	end = int(math.log(colLengh / 2 , 2))
	# 多种滑窗方案组合
	windowSizeList = list(map(int, np.logspace(4, end, end - 3,base=2)))
	# windowSizeList = [48, 52]
	wordSizeList = [3, 4]
	logRecord = []
	optimalModel = None
	tmpErrorRate, optimalWindowSize, optimalWordSize = 1, None, None
	for windowSize in windowSizeList:
		for wordSize in wordSizeList:
			trainErrorRate, model = trainAndPredict(windowSize,wordSize,trainInput, trainLabels)
			logRecord.append([windowSize, wordSize, trainErrorRate])
			if trainErrorRate < tmpErrorRate:
				tmpErrorRate, optimalWindowSize, optimalWordSize = trainErrorRate, windowSize, wordSize
				optimalModel = model

	logRecord.append(['The optimal parameters are selected as follows:'])
	logRecord.append(['windowSize', 'wordSize', 'testErrorRate'])
	testErrorRate = testPredict(optimalModel, optimalWindowSize, optimalWordSize, testInput, testLabels)
	logRecord.append([optimalWindowSize, optimalWordSize, testErrorRate])
	# 输出各种方案结果, 取优
	dfLog = pd.DataFrame(logRecord)
	dfLog.to_csv(datasets[dataSetNum] + '_optimalPaaLgbLog.csv', index = False, header = ['windowSize', 'wordSize', 'trainErrorRate'], encoding = 'utf-8')

if __name__ == "__main__":
	testOneDataSet(0)
