# coding: utf-8

import tensorflow as tf
import numpy as np

x = tf.placeholder(dtype=tf.float32, shape=[None, 2])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1])

w = tf.Variable(tf.zeros([2, 1]))
b = tf.Variable(tf.zeros([1]))

# 也可以这么理解：logits与 softmax都属于在输出层的内容，
# logits = tf.matmul(X, W) + bias
# 再对logits做归一化处理，就用到了softmax：
# Y_pred = tf.nn.softmax(logits,name='Y_pred')
# Unscaled log probabilities of shape [d_0, d_1, ..., d_{r-1}, num_classes] and dtype float32 or float64.
# 可以理解logits ——【batchsize，class_num】是未进入softmax的概率，一般是全连接层的输出，softmax的输入
logits = tf.matmul(x, w) + b

# 激活函数
output = tf.nn.sigmoid(logits)

# 交叉di
cross_entropy = tf.losses.sigmoid_cross_entropy(multi_class_labels=y, logits=logits)

train_step = tf.train.GradientDescentOptimizer(0.3).minimize(cross_entropy)

print(train_step)

x_value = np.array(
    [[1, 1],
     [1, 0],
     [0, 1],
     [0, 0]])
y_value = np.array(
    [[1],
     [1],
     [1],
     [0]])

init_op = tf.global_variables_initializer()

sess = tf.Session()

sess.run(init_op)

cross_entropy_value, logits_value, output_value = sess.run(
    [cross_entropy, logits, output],
    feed_dict={x: x_value,
               y: y_value})

print(cross_entropy_value)
print(logits_value)
print(output_value)

for current_step in range(100):
    cross_entropy_value, output_value, _ = sess.run(
        [cross_entropy, output, train_step],
        feed_dict={x: x_value,
                   y: y_value})

cross_entropy_value, logits_value, output_value, w_value, b_value = sess.run(
    [cross_entropy, logits, output, w, b],
    feed_dict={x: x_value,
               y: y_value})

print(">>>>>>>>>>>>>>>>>>")
print(cross_entropy_value)
print(logits_value)
print(output_value)
print(w_value)
print(b_value)
