

import tensorflow as tf
import csv

#读取csv数据
filename_queue = tf.train.string_input_producer(["./prediction_data.csv"])

reader = tf.TextLineReader()
key, value = reader.read(filename_queue)

# Default values, in case of empty columns. Also specifies the type of the
# decoded result.
record_defaults = [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]]

col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22 = tf.decode_csv(value, record_defaults=record_defaults)

#print("col1: %s, col2: %s, col3: %s, col4: %s, col5: %s, col6: %s, col7: %s, col8: %s, col9: %s, col10: %s, col11: %s" % 
#	(col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11))

key = col1
features = [col3, col4, col5, col6, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20]
judgeLabel = [col21, col22]

n_hidden_1 = 18
n_hidden_2 = 18
n_hidden_3 = 18
n_input = 18
n_classes = 2

x = tf.placeholder(tf.float32, [None, n_input])
y_ = tf.placeholder(tf.float32, [None, n_classes])

def multilayer_perceptron(x, weights, biases):
	layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
	layer_1 = tf.nn.relu(layer_1)

	layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
	layer_2 = tf.nn.relu(layer_2)

	layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
	layer_3 = tf.nn.relu(layer_3)

	out_layer = tf.matmul(layer_3, weights['out']) + biases['out']
	return out_layer


weights = {
	'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
	'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
	'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
	'out': tf.Variable(tf.random_normal([n_hidden_3, n_classes]))
}

biases = {
	'b1': tf.Variable(tf.random_normal([n_hidden_1])),
	'b2': tf.Variable(tf.random_normal([n_hidden_2])),
	'b3': tf.Variable(tf.random_normal([n_hidden_3])),
	'out': tf.Variable(tf.random_normal([n_classes]))
}

y = multilayer_perceptron(x, weights, biases)


saver = tf.train.Saver()

# Train
with tf.Session() as sess:

	sess.run(tf.global_variables_initializer())
	saver.restore(sess, "Model/haixue_new_3_model.ckpt")
	coord = tf.train.Coordinator()
	threads = tf.train.start_queue_runners(coord=coord)
	with open("test_new_with_module_3.csv","w+") as csvfile:
		csv_writer = csv.writer(csvfile)

		chanceId_set = []
		data_set = []
		for i in range(0, 88070):
			chanceId, example, label = sess.run([key, features, judgeLabel])
			#print(example)
			#print(label)
			
			chanceId_set.append(chanceId)
			data_set.append(example)
			
		result = tf.argmax(y,1)
		predictResult = result.eval(feed_dict={x: data_set})
		
		for i in range(88070): 
			list = [chanceId_set[i], predictResult[i]]
			print("index: %s, list: %s" % (i, list) )
			csv_writer.writerow(list)

			#if predictResult == 1:
			#	predictResult = 0
			#else:
			#	predictResult = 1	
			#list = [chanceId, predictResult]
			#print("index: %s, list: %s" % (i, list) )
			#csv_writer.writerow(list)
		

# Test trained model
#	for i in range(1500, 2000):
#		example2, label2 = sess.run([features, judgeLabel])
#		#print(example2)
#		#print(label2)
#		correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
#		accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#		print(sess.run(accuracy, feed_dict={x: [example2], y_: [label2]}))




