#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by Happiness on 2017/11/5
# 目标：学习自定义损失函数
# 商品预测

import tensorflow as tf
from numpy.random import RandomState

# 创建输入
batch_size = 8
x = tf.placeholder(tf.float32, shape=[None, 2], name='x-input')
y_ = tf.placeholder(tf.float32, shape=[None, 1], name='y-input')

# 创建权值
w1 = tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1))

# 创建前向神经网络
y = tf.matmul(x, w1)

# 设置损失函数
loss_less = 10
loss_more = 1
learning_rate = 0.001

#指数下降法计算学习率
global_step = tf.Variable(0)
decay_learning_rate = tf.train.exponential_decay(0.1, global_step=global_step, decay_steps=80, decay_rate=0.9, staircase=False)

# loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))
mse_loss = tf.losses.mean_squared_error(y, y_)
#加入正则项防止过拟合
tf.add_to_collection("losses",tf.contrib.layers.l2_regularizer(0.003)(w1))
tf.add_to_collection("losses",mse_loss)
loss = tf.add_n(tf.get_collection("losses"))

trail_step = tf.train.AdamOptimizer(learning_rate=decay_learning_rate).minimize(loss,global_step)

# 创建测试数据
rdm = RandomState(1)
X = rdm.rand(128, 2)
Y = [[x1 + x2 + (rdm.rand()) / 10.0 - 0.05] for (x1, x2) in X]

with tf.Session() as  sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    STEP = 5000
    for i in range(STEP):
        start = (i * batch_size) % 128
        end = start + batch_size
        sess.run(trail_step, feed_dict={x: X[start:end], y_: Y[start:end]})
        if i % 1000 == 0:
            print("After %d training step(s), w1 is: " % (i))
            print(sess.run(w1))
            print("learing_rate : %f" % sess.run(decay_learning_rate))
    print("Final w1 is: \n", sess.run(w1))

if __name__ == '__main__':
    pass
