
import os
import sys
sys.path.append("..")

import tensorflow as tf

import mnist_data

a = [i for i in xrange(24)]
b = tf.reshape(a, [-1, 2])
c = tf.reshape(b, [-1, 2, 3])
p = tf.Print(c, [c], summarize=24, message="ooo")
pb = tf.Print(b, [b], summarize=24)
with tf.Session() as sess:
	pb.eval()
	p.eval()

x1, y = mnist_data.train.next(1)
x2, y = mnist_data.train.next(1)
print x1
print x2
# k = []
# for l1 in x:
# 	pl1 = []
# 	for l2 in l1:
# 		pl2 = []
# 		for l3 in l2:
# 			pl3 = []
# 			for val in l3:
# 				_val = int(val * 255)
# 				pl3.append(_val)
# 				# print _val, 
# 			pl2.append(pl3)
# 		pl1.append(pl2)
# 	k.append(pl1)
print ""
b1 = tf.reshape(x1, [-1, 784])
b2 = tf.reshape(x2, [-1, 784])

with tf.Session() as sess:
	tf.Print(b1, [b1], summarize=784).eval()
	tf.Print(b2, [b2], summarize=784).eval()

x = tf.placeholder("float", [None, 784])
# x = tf.placeholder("float", [None, 28, 28, 1])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
XX = tf.reshape(x, [-1, 784])

y_ = tf.placeholder("float", [None, 10])
# y = tf.matmul(x, W) + b
y = tf.nn.softmax(tf.matmul(x, W) + b)
cross_entropy = -tf.reduce_sum(y_*tf.log(y))

train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)

x1, lable_ys = mnist_data.train.next(1)

with tf.Session() as sess:
	tf.initialize_all_variables().run()
	# r = y.eval(feed_dict={x:x1})
	r = cross_entropy.eval(feed_dict={x:x1, y_:lable_ys})
	# r = train_step.run(feed_dict={x:x1, y_:lable_ys})
	tf.Print(r, [r], summarize=784).eval()
