# -*- coding:utf-8 -*-
import os,sys
import re
import traceback
import time
import keras
from StockSampling import StockSampling
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, merge, Activation, Flatten, LSTM, Embedding, Input
from keras.optimizers import SGD, Adam
from keras.layers import Merge
from keras.utils.np_utils import to_categorical
from keras.layers import Convolution2D, MaxPooling2D

import numpy as np
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir))
import supeanut_config
from CommonLib.mylog import mylog


'''
作者：supeanut
创建时间：2016-xx-xx xx:xx:xx
功能描述：
	xxx
	xxxxx
相关配置：
	supeanut_config.XXX
历史改动：
	2016-xx-xx: xxxxxx
'''
class KerasSample:
	def __init__(self):
		pass

	def func(self):
		# optimizer:adagrad,rmsprop,adadelta,SGD
		# loss: mse,categorical_crossentropy,binary_crossentropy
		# Activation: tanh,relu,sigmoid,oftmax
		model1 = Sequential()
		model2 = Sequential()
		# 2D layer: Dense input_dim
		# 3D layer: input_dim,input_length
		# or just define input_shape(tuple)
		# add multi_layer
		model1.add(Dense(output_dim=64, input_dim=100, init='uniform'))
		model2.add(Dense(output_dim=64, input_dim=100))
		# mode can be sum,concat(with param concat_axis),mul,ave,dot(with param dot_axes),cos,
		# or define: mode=lambda x: x[0] - x[1])
		merged = Merge([model1, model2], mode='concat')
		model = Sequential()
		model.add(merged)
		model.add(Activation("relu"))
		# add normal layer
		model.add(Dense(output_dim=10))
		model.add(Activation("softmax"))
		model.add(Dropout(0.5))
		# consider one layer, can define every
		# --name can be used as 
		#model.compile(optimizer='rmsprop',
        #      loss={'main_output': 'binary_crossentropy', 'aux_output': 'binary_crossentropy'},
        #      loss_weights={'main_output': 1., 'aux_output': 0.2})
		#model.fit({'main_input': headline_data, 'aux_input': additional_data},
        #  {'main_output': labels, 'aux_output': labels},
        #  nb_epoch=50, batch_size=32)
		inputs = Input(shape=(784,), name='input_1')
		x = Dense(64, activation='relu')(inputs)
		x = Dense(64, activation='relu')(x)
		predictions = Dense(10, activation='softmax')(x)
		model = Model(input=inputs, output=predictions)
		inp = Input(shape=(784,))
		y = model(x)
		# --multi input and ouput
		model = Model(input=[main_input, auxiliary_input], output=[main_output, auxiliary_output])
		# --define different ouput loss weight
		model.compile(optimizer='rmsprop', loss='binary_crossentropy',
              loss_weights=[1., 0.2])
		# train multi_sequential
		model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
		model.fit([input_data_1, input_data_2], targets)
		# train single_sequential
		model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
		model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True))
		# train data: X_train(dimensions_list),Y_train(labels_list)
		# --X np format: np.random.random((1000, 784)),1000个数据，每个数据784维。
		# --Y np format: np.random.randint(10, size=(1000, 1)) 1000个数据的label，共10类
		# ----需要转成labels = to_categorical(labels, 10)，即matrix of size (1000, 10)，1000个数据，01表示的类
		model.fit(X_train, Y_train, nb_epoch=5, batch_size=32)
		model.train_on_batch(X_batch, Y_batch)
		loss_and_metrics = model.evaluate(X_test, Y_test, batch_size=32)
		classes = model.predict_classes(X_test, batch_size=32)
		proba = model.predict_proba(X_test, batch_size=32)
		model.save(filepath)
		keras.models.load_model(filepath)
		# save as JSON
		json_string = model.to_json()
		# model reconstruction from JSON:
		from keras.models import model_from_json
		model = model_from_json(json_string)
		model.save_weights('my_model_weights.h5')
		model.load_weights('my_model_weights.h5')
		# differnt struct but common node with same name
		model.load_weights('my_model_weights.h5', by_name=True)
		
	
	def VGG_like_convnet(self, ):
		model = Sequential()
		# input: 100x100 images with 3 channels -> (3, 100, 100) tensors.
		# this applies 32 convolution filters of size 3x3 each.
		model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=(3, 100, 100)))
		model.add(Activation('relu'))
		model.add(Convolution2D(32, 3, 3))
		model.add(Activation('relu'))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(Dropout(0.25))

		model.add(Convolution2D(64, 3, 3, border_mode='valid'))
		model.add(Activation('relu'))
		model.add(Convolution2D(64, 3, 3))
		model.add(Activation('relu'))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(Dropout(0.25))

		model.add(Flatten())
		# Note: Keras does automatic shape inference.
		model.add(Dense(256))
		model.add(Activation('relu'))
		model.add(Dropout(0.5))

		model.add(Dense(10))
		model.add(Activation('softmax'))

		sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
		model.compile(loss='categorical_crossentropy', optimizer=sgd)

		model.fit(X_train, Y_train, batch_size=32, nb_epoch=1)

	def Multilayer_Perceptron_MLP_for_multi_class_softmax_classification(self,):
		# Dense(64) is a fully-connected layer with 64 hidden` units.
		# in the first layer, you must specify the expected input data shape:
		# here, 20-dimensional vectors.
		model.add(Dense(64, input_dim=20, init='uniform'))
		model.add(Activation('tanh'))
		model.add(Dropout(0.5))
		model.add(Dense(64, init='uniform'))
		model.add(Activation('tanh'))
		model.add(Dropout(0.5))
		model.add(Dense(10, init='uniform'))
		model.add(Activation('softmax'))

		sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
		model.compile(loss='categorical_crossentropy',
					  optimizer=sgd,
					  metrics=['accuracy'])

		model.fit(X_train, y_train,
				  nb_epoch=20,
				  batch_size=16)
		score = model.evaluate(X_test, y_test, batch_size=16)

	def Alternative_implementation_of_a_similar_MLP(self):
		model = Sequential()
		model.add(Dense(64, input_dim=20, activation='relu'))
		model.add(Dropout(0.5))
		model.add(Dense(64, activation='relu'))
		model.add(Dropout(0.5))
		model.add(Dense(10, activation='softmax'))

		model.compile(loss='categorical_crossentropy',
					  optimizer='adadelta',
					  metrics=['accuracy'])

	def MLP_for_binary_classification(self, ):
		model = Sequential()
		model.add(Dense(64, input_dim=20, init='uniform', activation='relu'))
		model.add(Dropout(0.5))
		model.add(Dense(64, activation='relu'))
		model.add(Dropout(0.5))
		model.add(Dense(1, activation='sigmoid'))

		model.compile(loss='binary_crossentropy',
					  optimizer='rmsprop',
					  metrics=['accuracy'])
	
	def Sequence_classification_with_LSTM(self, ):
		model = Sequential()
		model.add(Embedding(max_features, 256, input_length=maxlen))
		model.add(LSTM(output_dim=128, activation='sigmoid', inner_activation='hard_sigmoid'))
		model.add(Dropout(0.5))
		model.add(Dense(1))
		model.add(Activation('sigmoid'))

		model.compile(loss='binary_crossentropy',
					  optimizer='rmsprop',
					  metrics=['accuracy'])

		model.fit(X_train, Y_train, batch_size=16, nb_epoch=10)
		score = model.evaluate(X_test, Y_test, batch_size=16)	

	def mymodel(self):
		# 获取数据
		obj = StockSampling()
		ma_list = []
		for ma_num in [5, 10, 20, 55, 89, 89, 144, 233, 377, 450, 610]:
			ma_list += obj.create_continue_index_name('ma',[ma_num],10)
		vol_list = []
		for vol_num in [5, 10, 20, 55, 89, 89, 144, 233, 377, 450, 610]:
			vol_list += obj.create_continue_index_name('volma_stock',[vol_num],10)
		#dimension_list = ma_list + ['open(0)','close(0)','high(0)','low(0)'] + vol_list
		dimension_list = ma_list + ['open(0)','close(0)','high(0)','low(0)']
		target_list = ['(close(2)-close(0))/close(0)>0']
		obj.caldims(period_s = '2009-12-25 00:00:00', period_e = '2017-01-04 00:00:00', adj = 'ori', date_grain = 'day',\
					code_str_list = ['sz300033',],\
					dimension_list = dimension_list,\
					target_list = target_list,\
					)
		result_dict = obj.sampling(train_periods = ['1992-12-25 00:00:00', '2015-01-01 00:00:00'],\
					valid_periods=['2015-01-02 00:00:00', '2015-01-02 00:00:00'],\
					test_periods=['2015-01-02 00:00:00', '2017-01-04 00:00:00'])
		# 搭建网络
		model = Sequential()
		model.add(Dense(64, input_dim=114, init='uniform', activation='relu'))
		model.add(LSTM(output_dim=64, activation='relu'))
		model.add(LSTM(output_dim=32, activation='relu'))
		model.add(LSTM(output_dim=1, activation='relu'))
		model.compile(loss='binary_crossentropy',
					  optimizer=Adam(),
					  metrics=['accuracy'])
		'''
		l1_input = Input(shape=(224,), name='l1_input')
		l2_1 = Dense(100, activation='relu', name='l2_1')(l1_input)
		l2_2 = Dense(100, activation='relu', name='l2_2')(l1_input)
		l3_1 = Dense(10, activation='relu', name='l3_1')(l2_1)
		l3_2 = Dense(10, activation='relu', name='l3_2')(l2_2)
		l4 = merge([l3_1, l3_2], mode='concat', name='l4')
		l5 = Dense(1, name='l5')(l4)
		model = Model(input=l1_input,output=l5)
		#model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),metrics=['accuracy'])
		model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),metrics=['accuracy'])
		'''
		#dims = np.random.random((10000, 444))
		dims = np.array(result_dict['train_set']['features'])
		#labels = np.random.randint(3, size=(10000, 1))
		#labels = to_categorical(labels, 3)
		labels = np.array(result_dict['train_set']['labels'])
		model.fit(dims, labels, nb_epoch=100, batch_size=100)
		#test_dims = np.random.random((10, 1000))
		#test_labels = np.random.randint(3, size=(10, 1))
		#test_labels = to_categorical(test_labels, 3)
		test_dims = np.array(result_dict['test_set']['features'])
		test_labels = np.array(result_dict['test_set']['labels'])
		score = model.evaluate(test_dims, test_labels)
		print score
		#classes = model.predict(test_dims)
		#print classes
		#from keras.utils.visualize_util import plot
		#plot(model, to_file='model.png')
		
if __name__ == '__main__':
	obj = KerasSample()
	obj.mymodel()
