# -*- coding: utf-8 -*-
"""
Created on Wed Nov  8 15:29:13 2017

@author: xuanlei
"""

import nn_3l
import pandas as pd
import tensorflow as tf
from sklearn.cross_validation import train_test_split
import numpy as np
import sklearn.preprocessing as prep
from sklearn.metrics import classification_report
from sklearn.preprocessing import normalize, MaxAbsScaler
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
#==============================================================================
# Global Option
#==============================================================================
train_set = pd.read_csv('public.train.csv')
test_set = pd.read_csv('public.test.csv')


def piple_feature(data):
	'''
	no need converge
	add some columns

	'''
	#    0.37999305299999997
	#    data = data_train[data_train['转换效率'] !=0]
	#    data = data[data['电压A'] !=0]
	data['t_diff'] = data['板温'] - data['现场温度']
	data['tdiff_g'] = data['t_diff'] / data['光照强度']
	data['tdiff_x'] = data['t_diff'] / data['现场温度']
	data['trans_diff'] = data['板温'] / data['光照强度']
	data['ps_avg'] = (data['转换效率A'] + data['转换效率B'] + data['转换效率C']) / 3
	data['g_pa'] = data['光照强度'] / data['转换效率A']
	data['g_pb'] = data['光照强度'] / data['转换效率B']
	data['g_pc'] = data['光照强度'] / data['转换效率C']
	data['t_pa'] = data['板温'] / data['转换效率A']
	data['t_pb'] = data['板温'] / data['转换效率B']
	data['t_pc'] = data['板温'] / data['转换效率C']
	data['u_avg'] = (data['电压A'] + data['电压B'] + data['电压C']) / 3
	data['za'] = data['电压A'] / data['电流A']
	data['zb'] = data['电压B'] / data['电流B']
	data['zc'] = data['电压C'] / data['电流C']
	data['z'] = data['za'] + data['zb'] + data['zc']
	data['all_u'] = data['电压A'] + data['电压B'] + data['电压C']
	data['all_a'] = data['电流A'] + data['电流B'] + data['电流C']
	data['u_pa'] = data['电压A'] / (data['电压A'] + data['电压B'] + data['电压C'])
	data['u_pb'] = data['电压B'] / (data['电压A'] + data['电压B'] + data['电压C'])
	data['u_pc'] = data['电压C'] / (data['电压A'] + data['电压B'] + data['电压C'])
	data['a_avg'] = (data['电流A'] + data['电流B'] + data['电流C']) / 3
	data['a_pa'] = data['电流A'] / (data['电流A'] + data['电流B'] + data['电流C'])
	data['a_pb'] = data['电流B'] / (data['电流A'] + data['电流B'] + data['电流C'])
	data['a_pc'] = data['电流C'] / (data['电流A'] + data['电流B'] + data['电流C'])
	data['p_avga'] = data['功率A'] / (data['功率A'] + data['功率B'] + data['功率C'])
	data['p_avgb'] = data['功率B'] / (data['功率A'] + data['功率B'] + data['功率C'])
	data['p_avgc'] = data['功率C'] / (data['功率A'] + data['功率B'] + data['功率C'])
	return data

feature_list = ['板温', '现场温度', '光照强度', '转换效率', '转换效率A', '转换效率B', '转换效率C', '电压A',
       '电压B', '电压C', '电流A', '电流B', '电流C', '功率A', '功率B', '功率C', '平均功率', '风速',
       '风向', 't_diff', 'tdiff_g', 'tdiff_x', 'trans_diff', 'ps_avg',
       'g_pa', 'g_pb', 'g_pc', 't_pa', 't_pb', 't_pc', 'u_avg', 'za', 'zb',
       'zc', 'z', 'all_u', 'all_a', 'u_pa', 'u_pb', 'u_pc', 'a_avg', 'a_pa',
       'a_pb', 'a_pc', 'p_avga', 'p_avgb', 'p_avgc']


def standard_scale(X_train, X_test):
	preprocessor = prep.StandardScaler().fit(X_train)
	X_train = preprocessor.transform(X_train)
	X_test = preprocessor.transform(X_test)
	return X_train, X_test

label_list = '发电量'
train_set = piple_feature(train_set)
test_set = piple_feature(test_set)
train_data,test_data = standard_scale(train_set, test_set)

#==============================================================================
# Hyper-Para Setting
#==============================================================================

input_size = len(feature_list)
LR = 0.01
h1_size = 19
h2_size = 25
h3_size = 18
output_size = 1
batch_size = 50

def dataframe_to_tensor(batch_data, feature_list, nm=True):
    if nm == True:
        feature_list_tran = batch_data.loc[:,feature_list].astype(float)
#        feature_list_tran = [np.array(item.loc[:,feature_list], dtype='float32') for item in batch_data]
        xs = np.array(feature_list_tran, dtype='float32')
#        print(xs.shape)
    else:
        feature_list_tran = batch_data.loc[:,feature_list]
        xs = np.array(feature_list_tran, dtype='float32')
#    print(xs.shape)
    return xs
    



def train():

    see_pp = []
    see_test_pp = []
    tf.reset_default_graph()
    model = nn_3l.LSTMRNN(input_size, output_size,h1_size, h2_size, h3_size,LR,batch_size)
    print('----------> 模型架构完毕，开始训练运行模型！')
    saver = tf.train.Saver()
    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        merged = tf.summary.merge_all()
        writer = tf.summary.FileWriter("./logs_en_x/", sess.graph)
        
        with tf.variable_scope('scope', reuse=True):
            max_epoch = 100
            epoch = 0
            while epoch < max_epoch:
                train_sample, test_sample = train_test_split(train_data, train_size=7650, test_size=1350)
                print('---------> 训练集与测试集划分完毕，开始训练模型！')
                batch_start = 0
                for i in range(1,153):
                    train_feature_list = train_sample[batch_start:batch_start+50]
                    xs_input = dataframe_to_tensor(train_feature_list, feature_list)
                    ys_input = dataframe_to_tensor(train_feature_list, label_list, nm=False)
                    feed_dict_train = {model.xs:xs_input, model.ys:ys_input.reshape([-1,1]),
                                       model.keep_prob: np.array(0.7,  dtype='float32'), model.train_phase: True}
                    batch_start += 50
                    _, cost = sess.run([model.train_op, model.cost], feed_dict=feed_dict_train)
                    if i%2 == 0:
                        rs = sess.run(merged,feed_dict=feed_dict_train)
                        writer.add_summary(rs, i)
#                        print(i)
#                        print('-------> now_cost: {0}'.format(cost, 2))
                        
                    if i%10 == 0:
                        batch_start_test = 0
                        for t in range(27):
                            test_feature_list = test_sample[batch_start_test:batch_start_test+50]
                            xs_test_input = dataframe_to_tensor(test_feature_list, feature_list)
                            vail_predict = sess.run(model.pred, feed_dict={model.xs:xs_test_input,
                                                                           model.keep_prob:np.array(1, dtype='float32'),
                                                                           model.train_phase: False})
                            ys_test = dataframe_to_tensor(test_feature_list, label_list, nm=False).reshape([-1,1])
							ptint('RMSE:',np.sqrt(metrics.mean_squared_error(ys_test, vail_predict)))
                            batch_start_test += 50
                        print("------->中间结果: epoch: {0}".format(epoch+1))
                        print('-------> now_cost: {0}'.format(cost, 2))

                saver.save(sess, 'lstm_para_cx/para_log')
                epoch += 1                

        



        
    
    


if __name__ == '__main__':
    train()