#!/usr/bin/env python
# -*- coding:utf-8 -*-

import tensorflow as tf
import numpy as np

BATCH_SIZE = 8
seed = 23455

# 基于seed产生随机数
rng = np.random.RandomState(seed)
X = rng.rand(32, 2)
Y = [[x0 + x1 + x1 + rng.rand() / 10.0 - 0.05] for (x0, x1) in X]
print('X:\n', X)
print('Y:\n', Y)

# 定义输入和参数
x = tf.placeholder(tf.float32,shape=(None,2))
y_ = tf.placeholder(tf.float32,shape=(None,1))
w1 = tf.Variable(tf.random_normal([2, 1], stddev= 1, seed= 1))
# 定义前向传播
y = tf.matmul(x, w1)

# 定义损失函数及反向传播方法
loss = tf.reduce_mean(tf.square(y - y_))
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss)

# 生成会话,训练STEP轮
with tf.Session() as sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    # 输出目前(未经训练)的参数取值
    print('w1:\n', sess.run(w1))
    print('\n')

    # 训练模型
    STEP = 20000
    for i in range(STEP):
        start = (i * BATCH_SIZE) % 32
        end = start + BATCH_SIZE
        sess.run(train_step, feed_dict= {x: X[start: end],y_: Y[start: end]})
        if i % 500 == 0:
            total_loss = sess.run(loss, feed_dict= {x: X, y_: Y})
            print('经过%d训练,loss是 %g' % (i, total_loss))
            print('w1:\n', sess.run(w1))

    # 输出训练后的参数取值
    print('\n')
    print('w1:\n', sess.run(w1))

'''
X:
X:
 [[0.83494319 0.11482951]
 [0.66899751 0.46594987]
 [0.60181666 0.58838408]
 [0.31836656 0.20502072]
 [0.87043944 0.02679395]
 [0.41539811 0.43938369]
 [0.68635684 0.24833404]
 [0.97315228 0.68541849]
 [0.03081617 0.89479913]
 [0.24665715 0.28584862]
 [0.31375667 0.47718349]
 [0.56689254 0.77079148]
 [0.7321604  0.35828963]
 [0.15724842 0.94294584]
 [0.34933722 0.84634483]
 [0.50304053 0.81299619]
 [0.23869886 0.9895604 ]
 [0.4636501  0.32531094]
 [0.36510487 0.97365522]
 [0.73350238 0.83833013]
 [0.61810158 0.12580353]
 [0.59274817 0.18779828]
 [0.87150299 0.34679501]
 [0.25883219 0.50002932]
 [0.75690948 0.83429824]
 [0.29316649 0.05646578]
 [0.10409134 0.88235166]
 [0.06727785 0.57784761]
 [0.38492705 0.48384792]
 [0.69234428 0.19687348]
 [0.42783492 0.73416985]
 [0.09696069 0.04883936]]
Y:
 [[0.969797861054287], [1.1634604857835003], [1.1942714411690643], [0.5384488448601837], [0.8632760602061648], [0.8339321949148727], [0.9280893354024468], [1.6879345369421652], [0.9036674505700479], [0.512956535191759], [0.7844252375973886], [1.299175094270699], [1.0919817282657285], [1.0880495166868347], [1.1734589741814216], [1.3098158421478576], [1.2387201482616106], [0.8289679938936613], [1.3550486329517142], [1.578666175492443], [0.7524305484165053], [0.7326318868381031], [1.2449966435046544], [0.7880975994021049], [1.557748860733639], [0.38892569979304564], [1.0277860551407527], [0.6104042277890978], [0.8594808823356304], [0.8810757430061306], [1.145640195903311], [0.1907476486033659]]
WARNING:tensorflow:From c:\python\python37\lib\site-packages\tensorflow\python\framework\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
2019-04-19 18:28:26.461518: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
w1:
 [[-0.8113182]
 [ 1.4845988]]


经过0训练,loss是 0.655701
w1:
 [[-0.80974597]
 [ 1.4852903 ]]
经过500训练,loss是 0.35731
w1:
 [[-0.46074435]
 [ 1.641878  ]]
经过1000训练,loss是 0.232481
w1:
 [[-0.21939856]
 [ 1.6984766 ]]
经过1500训练,loss是 0.170404
w1:
 [[-0.04415595]
 [ 1.7003176 ]]
经过2000训练,loss是 0.133037
w1:
 [[0.08942621]
 [1.673328  ]]
经过2500训练,loss是 0.106939
w1:
 [[0.19583555]
 [1.6322677 ]]
经过3000训练,loss是 0.0870619
w1:
 [[0.28375748]
 [1.5854434 ]]
经过3500训练,loss是 0.0712709
w1:
 [[0.35848638]
 [1.5374472 ]]
经过4000训练,loss是 0.0584907
w1:
 [[0.42332518]
 [1.4907393 ]]
经过4500训练,loss是 0.0480653
w1:
 [[0.48040026]
 [1.4465574 ]]
经过5000训练,loss是 0.0395331
w1:
 [[0.53113604]
 [1.4054536 ]]
经过5500训练,loss是 0.0325409
w1:
 [[0.5765325]
 [1.3675941]]
经过6000训练,loss是 0.0268078
w1:
 [[0.61732584]
 [1.3329403 ]]
经过6500训练,loss是 0.0221059
w1:
 [[0.6540846]
 [1.3013426]]
经过7000训练,loss是 0.0182493
w1:
 [[0.6872685]
 [1.272602 ]]
经过7500训练,loss是 0.015086
w1:
 [[0.71725976]
 [1.2465005 ]]
经过8000训练,loss是 0.0124914
w1:
 [[0.7443861]
 [1.2228197]]
经过8500训练,loss是 0.0103631
w1:
 [[0.7689324]
 [1.2013483]]
经过9000训练,loss是 0.00861742
w1:
 [[0.79115134]
 [1.1818889 ]]
经过9500训练,loss是 0.00718553
w1:
 [[0.811267 ]
 [1.1642567]]
经过10000训练,loss是 0.006011
w1:
 [[0.8294814]
 [1.1482829]]
经过10500训练,loss是 0.00504758
w1:
 [[0.84597576]
 [1.1338125 ]]
经过11000训练,loss是 0.00425734
w1:
 [[0.8609128]
 [1.1207061]]
经过11500训练,loss是 0.00360914
w1:
 [[0.87444043]
 [1.1088346 ]]
经过12000训练,loss是 0.00307745
w1:
 [[0.88669145]
 [1.0980824 ]]
经过12500训练,loss是 0.00264134
w1:
 [[0.8977863]
 [1.0883439]]
经过13000训练,loss是 0.00228362
w1:
 [[0.9078348]
 [1.0795243]]
经过13500训练,loss是 0.00199021
w1:
 [[0.91693527]
 [1.0715363 ]]
经过14000训练,loss是 0.00174954
w1:
 [[0.92517716]
 [1.0643018 ]]
经过14500训练,loss是 0.00155213
w1:
 [[0.93264157]
 [1.0577497 ]]
经过15000训练,loss是 0.00139019
w1:
 [[0.9394023]
 [1.0518153]]
经过15500训练,loss是 0.00125737
w1:
 [[0.9455251]
 [1.0464406]]
经过16000训练,loss是 0.00114842
w1:
 [[0.95107025]
 [1.0415728 ]]
经过16500训练,loss是 0.00105905
w1:
 [[0.9560928]
 [1.037164 ]]
经过17000训练,loss是 0.000985753
w1:
 [[0.96064115]
 [1.0331714 ]]
经过17500训练,loss是 0.000925622
w1:
 [[0.96476096]
 [1.0295546 ]]
经过18000训练,loss是 0.00087631
w1:
 [[0.9684917]
 [1.0262802]]
经过18500训练,loss是 0.000835858
w1:
 [[0.9718707]
 [1.0233142]]
经过19000训练,loss是 0.000802676
w1:
 [[0.974931 ]
 [1.0206276]]
经过19500训练,loss是 0.000775461
w1:
 [[0.9777026]
 [1.0181949]]


w1:
 [[0.98019385]
 [1.0159807 ]]
'''
