from PIL import Image
import matplotlib.pyplot as plt
import csv
import numpy as np
import tensorflow as tf
import model

# 读取csv文件方式1
csvFile = open("sampleSubmission.csv", "r")
reader = csv.reader(csvFile)  # 返回的是迭代类型

data = []
test_dir = 'F:\\001-python\\test_1\\'
train_logs_dir = 'F:\\001-python\\logs\\train\\'



for item in reader:
    if item[0] != 'id':
        file = test_dir + item[0] + ".jpg"
        image_raw = Image.open(file)
        plt.imshow(image_raw)
        image_raw = image_raw.resize([208, 208])
        image_raw = np.array(image_raw)
        with tf.Graph().as_default():
            BATCH_SIZE = 1
            N_CLASSES = 2

            image = tf.cast(image_raw, tf.float32)
            image = tf.image.per_image_standardization(image)
            image = tf.reshape(image, [1, 208, 208, 3])
            logit = model.inference(image, BATCH_SIZE, N_CLASSES)

            logit = tf.nn.softmax(logit)

            x = tf.placeholder(tf.float32, shape=[208, 208, 3])

            saver = tf.train.Saver()

            with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
                ckpt = tf.train.get_checkpoint_state(train_logs_dir)
                if ckpt and ckpt.model_checkpoint_path:
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    print('Loading success, global_step is %s' % global_step)
                else:
                    print('No checkpoint file found')
                prediction = sess.run(logit, feed_dict={x: image_raw})
                max_index = np.argmax(prediction)
                item[1] = max_index

        data.append(item)
        print(item)

print(data)
m = len(data)
csvFile2 = open("sampleSubmission_1.csv", "w")
writer = csv.writer(csvFile2)
for i in range(m):
    writer.writerow(data[i])

csvFile.close()
csvFile2.close()
