# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from tkinter import _flatten  # 将二维列表转成一维元组
import math
import itertools
import functools, time
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn import preprocessing

datasets = [
	"Two_Patterns",						#0
	"ChlorineConcentration",			#1
	"wafer",							#2
	"MedicalImages",					#3
	"FaceAll",							#4
	"OSULeaf",							#5
	"Adiac",							#6
	"SwedishLeaf",						#7
	"yoga",								#8
	"fish",								#9
	"Lighting7",						#10
	"Lighting2",						#11
	"Trace",							#12
	"synthetic_control",				#13
	"FacesUCR",							#14
	"CinC_ECG_torso",					#15
	"MALLAT",							#16
	"Symbols",							#17
	"Coffee",							#18
	"ECG200",							#19
	"ECG5000",							#20
	"FaceFour",							#21
	"OliveOil",							#22
	"Gun_Point",						#23
	"Beef",								#24
	"DiatomSizeReduction",				#25
	"CBF",								#26
	"ECGFiveDays",						#27
	"TwoLeadECG",						#28
	"SonyAIBORobotSurfaceII",			#29
	"MoteStrain",						#30
	"ItalyPowerDemand",					#31
	"SonyAIBORobotSurface",				#32
	"Haptics",							#33
	"InlineSkate",						#34
	"50words",							#35
	"Cricket_Y",						#36
	"Cricket_X",						#37
	"Cricket_Z",						#38
	"WordsSynonyms",					#39
	"uWaveGestureLibrary_Z",			#40
	"uWaveGestureLibrary_Y",			#41
	"uWaveGestureLibrary_X",			#42
	"NonInvasiveFatalECG_Thorax1",		#43
	"NonInvasiveFatalECG_Thorax2",		#44
	"StarLightCurves",					#45
]

def recordTime(fn):
    @functools.wraps(fn)
    def wrapper(*args, **kw):
        startTime = time.time()
        tmp = fn(*args, **kw)
        endTime = time.time()
        print('%s executed in %s s' % (fn.__name__, endTime - startTime ))
        return tmp
    return wrapper

def loadDatasets(dataSetNum):
	'''
	Desc: 读取第dataSetNum组(从0开始)数据集，把训练/测试用的输入和答案标记分别输出
	'''
	fileName = './' + datasets[dataSetNum] + '/' + datasets[dataSetNum] + '_TRAIN'
	dfData = pd.read_csv(fileName, header = None)
	trainLabels = dfData.iloc[:,0]
	print("trainLabels's size : ",trainLabels.shape)
	trainInput = dfData.iloc[:,1:]
	print("trainInput size : ", trainInput.shape)

	fileName = './' + datasets[dataSetNum] + '/' + datasets[dataSetNum] + '_TEST'
	dfData = pd.read_csv(fileName, header = None)
	testLabels = dfData.iloc[:,0]
	print("testLabels's size : ",testLabels.shape)
	testInput = dfData.iloc[:,1:]
	print("testInput size : ", testInput.shape)
	
	return trainInput, trainLabels, testInput, testLabels

def zNorm(dfData):
	'''
	Desc:输入dataform格式的数据，对每个元素进行z标准化，输出np.array格式数据
	'''
	row = dfData.shape[0]
	col = dfData.shape[1]
	mat = np.array(dfData)
	for i in range(0, row):
		tmpMean = mat[i].mean()
		tmpStd = mat[i].std()
		mat[i] = np.array(list(map(lambda x : (x - tmpMean)/tmpStd, mat[i])))

	return mat

def combination(alphaNum, repeatNum):
	'''
	Desc: 该函数能直接得到用前alphaNum个字母, 组合长度为repeatNum的word的列表
	比如repeatNum(4,4)就能返回['aaaa','aaab',……,'dddd']
	'''
	alpha = "abcdefghijklmnopqrstuvwxyz"
	return list(map(''.join, itertools.product(alpha[0:alphaNum], repeat = repeatNum)))

def valueToAlphabet(x):
	if x <= -0.67:
		return 'a'
	if x > -0.67 and x <= 0:
		return 'b'
	if x > 0 and x <= 0.67:
		return 'c'
	if x > 0.67:
		return 'd' 

def calcSlope(window, frameLength):
	'''
	Desc: 计算某窗口开头做字母用的几个数的斜率, 返回字母
	'''
	length = math.ceil(frameLength)
	numeratorA, numeratorB, numeratorC, denominatorA, denominatorB = 0, 0, 0, 0, 0
	for i in range(0, length):
		numeratorA += (i + 1) * window[i]
		numeratorB += (i + 1) 
		numeratorC += window[i]
		denominatorA += window[i] * window[i]
		denominatorB += window[i]
	if length * denominatorA - denominatorB * denominatorB == 0:
		slope = 0
	else:
		slope = (length * numeratorA - numeratorB * numeratorC)/ (length * denominatorA - denominatorB * denominatorB)
	if slope <= -1:
		return 'A'
	if -1 < slope < 0:
		return 'B'
	if slope == 0:
		return 'C'
	if 0 < slope < 1:
		return 'D'
	if 1 <= slope:
		return 'E' 

def slideWindow(inputData, windowSize, wordSize):
	'''
	Desc: 借助滑窗做PAA,得到一系列word特征
	'''
	paaDataMat = []
	colLength = inputData.shape[1]
	frameLength = windowSize / wordSize
	for aRow in inputData:
		wordList = []
		for i in range(0, colLength - windowSize + 1):
			window = aRow[i:i + windowSize]   # 滑窗
			word = calcSlope(window, frameLength) # word开头是斜率换成的字母
			frameSum = 0
			currentFrameSize = 0
			remaining = 0
			for j in range(0, windowSize):   # 在窗口中做PAA
				remaining = frameLength - currentFrameSize
				if remaining > 1:
					frameSum += window[j]
					currentFrameSize += 1
				else:
					frameSum += remaining * window[j]
					currentFrameSize += remaining
				if  currentFrameSize == frameLength:
					word += valueToAlphabet(frameSum / frameLength)
					#word = word.join(valueToAlphabet(frameSum / frameLength))
					frameSum = (1 - remaining) * window[j]
					currentFrameSize = 1 - remaining		
			wordList.append(word)
		# numerosity reduction
		wordListLength = len(wordList)
		reducedWordList = []
		for i in range(1, wordListLength):
			if wordList[i] != wordList[i - 1]:
				reducedWordList.append(wordList[i])
		paaDataMat.append(reducedWordList)
	return paaDataMat

from sklearn.feature_extraction.text import TfidfTransformer  
from sklearn.feature_extraction.text import CountVectorizer  
def makeTfIdfMat(paaData):
	'''
	Desc: 根据paa的word, 制作出TfIdf矩阵(多维list代替)
	'''
	# 变换为方便sklearn进行TfIdf变换的字符串列表
	paaDataContent = list(map(lambda x : ' '.join(x), paaData))
	tfIdfMat = []
	# 该类会将文本中的词语转换为词频矩阵，矩阵元素a[i][j] 表示j词在i类文本下的词频
	vectorizer = CountVectorizer() 
	# 该类会统计每个词语的tf-idf权值
	transformer = TfidfTransformer() 
	# 第一个fit_transform是计算tf-idf，第二个fit_transform是将文本转为词频矩阵
	tfIdf = transformer.fit_transform(vectorizer.fit_transform(paaDataContent))  
	# 获取词袋模型中的所有词语
	word=vectorizer.get_feature_names()
	# 将tf-idf矩阵抽取出来，元素a[i][j]表示j词在i类文本中的tf-idf权重
	weight = tfIdf.toarray()
	# 打印每类文本的tf-idf词语权重，第一个for遍历所有文本，第二个for便利某一类文本下的词语权重
	for i in range(len(weight)):
		# print(u"-------这里输出第",i,u"类文本的词语tf-idf权重------")
		#for j in range(len(word)):
		# 	print(word[j],weight[i][j])
		aRowFeature = []
		for j in range(len(word)):
			aRowFeature.append(weight[i][j])
		tfIdfMat.append(aRowFeature)
		
	return tfIdfMat

def dealResult(predRes):
	newRes = []
	for aRow in predRes:
		newRes.append(aRow.tolist().index(max(aRow)))
	return newRes

def mergeClassification(dataMat, labels):
	'''
	Desc: 将答案为同一类别的数据，都归到一起，返回字典
	'''
	mergeDict = {}
	length = len(labels)
	for i in range(0, length):
		if labels[i] not in mergeDict:
			mergeDict[labels[i]] = []
			mergeDict[labels[i]].append(dataMat[i])
		else:
			mergeDict[labels[i]].append(dataMat[i])

	wordLength = len(dataMat[0][0])
	oldWordList = combination(4, wordLength - 1)  # 特征列
	wordList = []
	for x in ['A', 'B', 'C', 'D', 'E']:
		for y in oldWordList:
			wordList.append(x + y)

	classPaaMat = []
	for classify in mergeDict:
		mergeDict[classify].append(wordList)
		# 将多维列表转成一维元组, 再转为list, 才能方便制作tfIdf矩阵
		classPaaMat.append(list(_flatten(mergeDict[classify])))
	return classPaaMat

def cosSim(vector1,vector2):
	'''
	Desc: 计算余弦相似度
	'''
	dotProduct = 0.0
	normA = 0.0
	normB = 0.0
	for a,b in zip(vector1,vector2):
		dotProduct += a*b
		normA += a**2
		normB += b**2
	if normA == 0.0 or normB==0.0:
		return None
	else:
		return dotProduct / ((normA*normB)**0.5)

def compareAndClassify(caseTfI, classTfIdf):
	'''
	Desc: 比对case和class的tfIdf矩阵, 输出分类结果
	'''
	classRes = []
	for case in caseTfI:
		caseClassRes = 0
		tmpRec = 0
		for classify, tfIdfList in enumerate(classTfIdf):
			tmpClassifyRec = cosSim(case, tfIdfList)
			if tmpClassifyRec > tmpRec:
				tmpRec = tmpClassifyRec
				caseClassRes = classify
			# print(classify, cosSim(case, tfIdfList))
		classRes.append(caseClassRes)
	return classRes

def makeCaseTf(trainInput):
	'''
	Desc: 制作训练集的每个case的If向量, 输出多维list代表的矩阵
	'''
	wordLength = len(trainInput[0][0])
	oldWordList = combination(4, wordLength - 1)  # 特征列

	wordList = []
	for x in ['A', 'B', 'C', 'D', 'E']:
		for y in oldWordList:
			wordList.append(x + y)
            
	caseTfMat = []
	for case in trainInput:
		wordCnt = list(map(lambda x : case.count(x), wordList))
		cntSum = sum(wordCnt)
		caseTfMat.append(list(map(lambda x : x / cntSum, wordCnt)))

	return caseTfMat

@recordTime
def trainAndPredict(windowSize, wordSize, trainInput, trainLabels):
	'''
	Desc: 用tfIdf制作分类模型(产生n个tfIdf分类向量), 并预测训练集错误率, 
	返回该错误率和分类tfidf, 以便后续遍历完滑窗方案后用最优tfidf去测试测试集
	'''
	trainInput = slideWindow(trainInput, windowSize, wordSize)
	trainCaseTf = makeCaseTf(trainInput)
	trainClassPaaMat = mergeClassification(trainInput, trainLabels)
	# 把数据利用pandas记录到csv
	dfRec = pd.DataFrame(trainClassPaaMat)
	dfRec.to_csv('recordClassPaaMat.csv', index = False, header = False)
	classTfIdf = makeTfIdfMat(trainClassPaaMat) # 测试测试集时候也只能用该矩阵
	trainClassifyRes = compareAndClassify(trainCaseTf, classTfIdf)
	# 求错误率
	trainErrorCnt = 0
	for i in range(0, len(trainClassifyRes)):
		if trainLabels[i] != trainClassifyRes[i]:
			trainErrorCnt += 1
	trainErrorRate = trainErrorCnt / len(trainClassifyRes)
	print('trainErrorRate : ', trainErrorRate)

	return trainErrorRate, classTfIdf


def testPredict(classTfIdf, windowSize, wordSize, testInput, testLabels):
	'''
	Desc: 直接拿训练好的模型(classTfIdf)来预测测试集
	'''
	# 拿测试集来测试	
	testInput = slideWindow(testInput, windowSize, wordSize)
	testCaseTf = makeCaseTf(testInput)
	testClassifyRes = compareAndClassify(testCaseTf, classTfIdf)
	# 求错误率
	testErrorCnt = 0
	for i in range(0, len(testClassifyRes)):
		if testLabels[i] != testClassifyRes[i]:
			testErrorCnt += 1
	testErrorRate = testErrorCnt / len(testClassifyRes)
	print('testErrorRate : ', testErrorRate)
	return testErrorRate

def testOneDataSet(dataSetNum):
	'''
	Desc: 选取一组数据集(第dataSetNum组), 用不同的滑窗方式来比较数据预测结果, 迭代取优
	'''
	trainInput, trainLabels, testInput, testLabels = loadDatasets(dataSetNum)	
	trainInput = zNorm(trainInput)
	# 把数据利用pandas记录到csv
	dfRec = pd.DataFrame(trainInput)
	dfRec.to_csv('trainZnormMat.csv', index = False, header = False)
	testInput = zNorm(testInput)
	# 对答案labels做[0, n)重新编码, 否则统计答案会错误
	lE = preprocessing.LabelEncoder()
	trainLabels = lE.fit(trainLabels).transform(trainLabels)
	testLabels = lE.transform(testLabels)

	colLengh = len(trainInput[0])
	end = int(math.log(colLengh / 2 , 2))
	# 多种滑窗方案组合
	# windowSizeList = list(map(int, np.logspace(4, end, end - 3,base=2)))
	windowSizeList = [48]
	wordSizeList = [3]
	logRecord = []
	optimalClassTfIdf = None
	tmpErrorRate, optimalWindowSize, optimalWordSize = 1, None, None
	for windowSize in windowSizeList:
		for wordSize in wordSizeList:
			trainErrorRate, classTfIdf = trainAndPredict(windowSize,wordSize,trainInput, trainLabels)
			logRecord.append([windowSize, wordSize, trainErrorRate])
			if trainErrorRate < tmpErrorRate:
				tmpErrorRate, optimalWindowSize, optimalWordSize = trainErrorRate, windowSize, wordSize
				optimalClassTfIdf = classTfIdf

	logRecord.append(['The optimal parameters are selected as follows:'])
	logRecord.append(['windowSize', 'wordSize', 'testErrorRate'])
	testErrorRate = testPredict(optimalClassTfIdf, optimalWindowSize, optimalWordSize, testInput, testLabels)
	logRecord.append([optimalWindowSize, optimalWordSize, testErrorRate])	

	# 输出各种方案结果, 取优
	dfLog = pd.DataFrame(logRecord)
	dfLog.to_csv(datasets[dataSetNum] + '_optimalTfSlopPaaCosinSimilarityLog.csv', index = False, header = ['windowSize', 'wordSize', 'trainErrorRate'])

if __name__ == "__main__":
	testOneDataSet(0)
