import tensorflow as tf
import numpy as np
import os
from skimage import io,transform
import time



def inference(input_tensor):


    with tf.variable_scope('layer1-conv1'):
        conv1_weights = tf.get_variable("weight", [5, 5, 3, 32],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases = tf.get_variable("bias", [32], initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
        print(relu1)

    with tf.name_scope("layer2-pool1"):
        pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
        norm1 = tf.nn.lrn(pool1, depth_radius=5, bias=2.0, alpha=1e-3, beta=0.75, name='norm1')


    with tf.variable_scope("layer3-conv2"):
        conv2_weights = tf.get_variable("weight", [3, 3, 32, 64],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable("bias", [64], initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.conv2d(norm1, conv2_weights, strides=[1, 2, 2, 1], padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
        print(relu2)



    with tf.variable_scope("layer4-conv3"):
        conv3_weights = tf.get_variable("weight", [3, 3, 64, 128],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv3_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))
        conv3 = tf.nn.conv2d(relu2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
        print(relu3)

    with tf.name_scope("layer5-pool2"):
        pool2 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
        print(pool2)
        norm2 = tf.nn.lrn(pool2, depth_radius=5, bias=2.0, alpha=1e-3, beta=0.75, name='norm2')



    with tf.variable_scope("fc1"):
        fc1 = tf.layers.flatten(norm2)
        fc2 = tf.layers.dense(fc1, 2, activation=tf.nn.sigmoid)
    return fc2

def get_filelist():
    images = []
    labels = []
    for root, dirs, files in os.walk('D:/dataset/emotion/sad'):
        for file in files:
            file = 'D:/dataset/emotion/sad/' + file
            images.append(file)
            labels.append([0, 1])
    for root, dirs, files in os.walk('D:/dataset/emotion/happy'):
        for file in files:
            file = 'D:/dataset/emotion/happy/' + file
            images.append(file)
            labels.append([1, 0])

    return np.asarray(images), np.asarray(labels, np.int32)


def get_data(file_list, index, batch_size, label_list):
    images = []
    labels = []

    for i in range(index * batch_size, (1 + index) * batch_size):
        i = i % (len(file_list))
        img = io.imread(file_list[i])
        img = transform.resize(img, (100, 100))
        images.append(img)
        labels.append(label_list[i])
    return np.asarray(images, np.float32), np.asarray(labels, np.int32)

x = tf.placeholder(tf.float32, shape=[None, 100, 100, 3], name='x')
y_ = tf.placeholder(tf.float32, shape=[None, 2], name='y_')

logits = inference(x)
cross_loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_)
loss = tf.reduce_mean(cross_loss)

tf.add_to_collection('losses', loss)
# 设置整体学习率为α为0.001
train_vars = tf.trainable_variables()

train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss, var_list=train_vars)

tf.device('/gpu:0')
print("training start")
n_epoch=10000
batch_size=50
x_train,y_train=get_filelist()

correct_prediction=tf.equal(tf.cast(tf.argmax(logits,1),tf.float32),y_)
acc=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))


with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    train_acc = 0
    test_acc = 0
    train_loss = 0
    for epoch in range(n_epoch):
        start_time = time.time()
        feed_img, feed_label = get_data(x_train, epoch, batch_size, y_train)
        _, err, ac = sess.run([train_op, loss, acc], feed_dict={x: feed_img, y_: feed_label})

        if epoch % 100 == 0:
            print("epoch %d, loss: %.2f, ac : %.2f"%(epoch, err, ac))
    saver.save(sess, "./happy_and_sad.model", global_step=10000)

# prediction = tf.cast(tf.argmax(logits, 1), tf.float32)

# with tf.Session() as sess:
#     saver.restore(sess, tf.train.latest_checkpoint('.'))
#     cat_path = "D:/images/train_data/test_img/0/"
#     dog_path = "D:/images/train_data/test_img/1/"
#     dogs = os.listdir(dog_path)
#     cats = os.listdir(cat_path)
#     count = 0
#     for f in dogs:
#         if os.path.isfile(os.path.  join(dog_path, f)):
#             image = io.imread(os.path.join(dog_path, f))
#             copy = np.copy(image)
#             image = transform.resize(image, (100, 100))
#             image = np.float32(image)
#             image_tensor = np.expand_dims(image, 0)
#             digit = sess.run(prediction, feed_dict={x: image_tensor})
#             print("predict digit : %d., actual digit : %s"%(digit[0], 0))
#             if digit[0] == 0:
#                 count = count + 1
#                 cv.putText(copy, "dog", (20, 50), cv.FONT_HERSHEY_SCRIPT_SIMPLEX, 1.0, (0, 0, 255), 2, 8)
#                 cv.imshow("Image Classification", copy)
#                 cv.waitKey(0)
#     for f in cats:
#         if os.path.isfile(os.path.join(cat_path, f)):
#             image = io.imread(os.path.join(cat_path, f))
#             copy = np.copy(image)
#             image = transform.resize(image, (100, 100))
#             image = np.float32(image)
#             image_tensor = np.expand_dims(image, 0)
#             digit = sess.run(prediction, feed_dict={x: image_tensor})
#             print("predict digit : %d., actual digit : %s"%(digit[0], 1))
#             if digit[0] == 1:
#                 count = count + 1
#                 cv.putText(copy, "cat", (20, 50), cv.FONT_HERSHEY_SCRIPT_SIMPLEX, 1.0, (0, 0, 255), 2, 8)
#                 cv.imshow("Image Classification", copy)
#                 cv.waitKey(0)
#     print("correct precent: %f"%(count/(len(cats)+len(dogs))))