#!/usr/bin/python
#coding:utf-8
'''
神经网络分类算法
'''

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import data.dataFormat as data
from sklearn import model_selection
from sklearn.utils import shuffle    


from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from math import sqrt


def formatData(train_x):
    '''
    数据格式化
    '''
    from sklearn import preprocessing

    #标准化 平移到0轴附件
    # train_x = preprocessing.scale(train_x) 

    # #归一化 0~1 之间
    min_max_scaler = preprocessing.MinMaxScaler()
    train_x = min_max_scaler.fit_transform(train_x)
    # #正则化
    # train_x = preprocessing.normalize(train_x, norm='l2')

    return train_x

def check(y_true, y_pred):
    '''
    结果验证
    args:
        y_true:测试集目标真实值
        y_pred:测试集目标预测值
    
    说明：
        map：越小越好
        r2：越接近1越好

    return:
        mape+mae+mse+rmse+r2
    '''
    index=3 #小数位个数

    n = len(y_true)
    mape = round(sum(np.abs((y_true - y_pred)/y_true))/n*100,index)
    mae=round(mean_absolute_error(y_true, y_pred),index)
    mse=round(mean_squared_error(y_true, y_pred),index)
    rmse=round(sqrt(mean_squared_error(y_true, y_pred)),index)
    r2=round(r2_score(y_true, y_pred),index)

    result="mape:"+str(mape)+" mae:"+str(mae)+" mse:"+ str(mse)+" rmse:"+str(rmse)+" r2:"+str(r2)
    result="mape:"+str(mape)+" r2:"+str(r2)+" rmse:"+str(rmse)
    return result

train_x,train_y,test_x=data.read()
print("训练数据行数",len(train_x),"验证数据行数",len(test_x))
print("*"*10)
train_x=formatData(train_x)

x_data,X_test,y_data,y_test= model_selection.train_test_split(train_x,train_y,test_size=0.1) #数据拆分，random_state限制数据随机，test_size测试集数据比例 ,random_state=0

x_data=np.array(x_data)
y_data=np.array(y_data)
test_X=np.array(X_test)
test_Y=np.array(y_test)
 

inputSize=train_x.shape[1]
print("特征数",inputSize)

#1、 构建模型，构建计算图：定义特征数据和标签数据的占位符,shape中None表示行的数量未知，在实际训练时决定一次带入多少行样本，从一个样本的随机SDG到批量SDG都可以
x = tf.placeholder(tf.float32,[None,inputSize],name = "X")      #输入
y = tf.placeholder(tf.float32,[None,1],name = "Y")           #输出


with tf.name_scope("Model"):
    
    #2、构建模型
    w = tf.Variable(initial_value= tf.random_normal([inputSize,1],stddev=0.01),name="W")
    b = tf.Variable(initial_value=tf.zeros(shape=[1]),name="b")

    #3、定义预测值，构建y=ax+b
    pred = tf.matmul(x,w) + b  #multiply 各元素相乘  matmul 矩阵相乘
    


#4、构建损失函数mse
with tf.name_scope("LossFunction"):
    # loss_function = tf.reduce_mean(tf.pow(y - pred,2))    #均方误差 mse
    loss_function = tf.reduce_mean(tf.square(y - pred)) 

#5、优化函数，使用梯度下降
learning_rate = 0.001  #学习率
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)

#6、执行阶段
with  tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    #迭代训练
    train_epochs = 1000
    for epoch in range(train_epochs):
        loss_sum = 0.0
        for xs,ys in zip(x_data,y_data):

            #改变数据格式
            xs = xs.reshape(1,inputSize) 
            ys = ys.reshape(1,1)
            #feed数据必须和Placeholder的shape一致
            _,loss = sess.run([optimizer,loss_function],feed_dict={x:xs,y:ys})
            
            loss_sum = loss_sum + loss

        #打乱数据顺序，防止按原次序假性训练输出
        x_data,y_data = shuffle(x_data,y_data)
        
        b0temp = b.eval(session=sess)            #训练中当前变量b值
        w0temp = w.eval(session=sess)            #训练中当前权重w值
        loss_average = loss_sum/len(y_data)      #当前训练中的平均损失
        
        print("epoch=",epoch+1,"loss=",loss_average)#,"b=",b0temp,"w=",w0temp)

    #模型应用
    predict = sess.run(pred,feed_dict={x:test_X})
    predict=predict.flatten().tolist()  #二维转一维转list
    print(predict)
    print(check(test_Y,predict))