# -*- coding: utf-8 -*-

# 系统模块
import tensorflow as tf
import numpy as np
import os
import cv2

# 项目模块
import dnn_tools


picture_dir = './datasets/'
picture_list = []
# 建议不要把图片一次加载到内存，为了节省内存，最好边加载边使用
for (dir_path, dir_names, file_names) in os.walk(picture_dir):
    for filename in file_names:
        if filename.endswith('.jpg'):
            picture_list.append(os.sep.join([dir_path, filename]))

print("图像总数: ", len(picture_list))

# 图像大小和Channel
image_height = 64
image_width = 64
image_channel = 3

# 每次使用多少样本训练
batch_size = 4
n_batches = len(picture_list) // batch_size

# 图片格式对应输入X
img_data = []
for img_file in picture_list:
    img_data.append(cv2.imread(img_file))
img_data1 = np.array(img_data)
img_data = img_data1 / 255.0
# print(img_data.shape)   # (44112, 64, 64, 3)

X = tf.placeholder(tf.float32, shape=[None, image_height, image_width, image_channel])


def gated_cnn(w_shape_, fan_in, gated=True, payload=None, mask=None, activation=True):
    w_shape = [w_shape_[0], w_shape_[1], fan_in.get_shape()[-1], w_shape_[2]]
    b_shape = w_shape_[2]

    def get_weights(shape, name, mask_in=None):
        weights_initializer = tf.contrib.layers.xavier_initializer()
        w = tf.get_variable(name, shape, tf.float32, weights_initializer)

        if mask_in:
            filter_mid_x = shape[0] // 2
            filter_mid_y = shape[1] // 2
            mask_filter = np.ones(shape, dtype=np.float32)
            mask_filter[filter_mid_x, filter_mid_y + 1:, :, :] = 0.
            mask_filter[filter_mid_x + 1:, :, :, :] = 0.

            if mask_in == 'a':
                mask_filter[filter_mid_x, filter_mid_y, :, :] = 0.

            w *= mask_filter
        return w

    if gated:
        w_f = get_weights(w_shape, "v_W", mask_in=mask)
        w_g = get_weights(w_shape, "h_W", mask_in=mask)

        b_f = tf.get_variable("v_b", b_shape, tf.float32, tf.zeros_initializer())
        b_g = tf.get_variable("h_b", b_shape, tf.float32, tf.zeros_initializer())

        conv_f = tf.nn.conv2d(fan_in, w_f, strides=[1, 1, 1, 1], padding='SAME')
        conv_g = tf.nn.conv2d(fan_in, w_g, strides=[1, 1, 1, 1], padding='SAME')
        if payload is not None:
            conv_f += payload
            conv_g += payload

        fan_out = tf.multiply(tf.tanh(conv_f + b_f), tf.sigmoid(conv_g + b_g))
    else:
        w = get_weights(w_shape, "W", mask_in=mask)
        b = tf.get_variable("b", b_shape, tf.float32, tf.zeros_initializer())
        conv = tf.nn.conv2d(fan_in, w, strides=[1, 1, 1, 1], padding='SAME')
        if activation:
            fan_out = tf.nn.relu(tf.add(conv, b))
        else:
            fan_out = tf.add(conv, b)

    return fan_out


def pixel_cnn(layers=12, f_map=32):
    v_stack_in, h_stack_in = X, X
    # 摘要
    tf.summary.image('images', X)

    for i in range(layers):
        filter_size = 3 if i > 0 else 7
        mask = 'b' if i > 0 else 'a'
        residual = True if i > 0 else False
        i = str(i)

        with tf.variable_scope("v_stack" + i):
            v_stack = gated_cnn([filter_size, filter_size, f_map], v_stack_in, mask=mask)
            v_stack_in = v_stack
            # 摘要
            dnn_tools.activation_summary(v_stack_in)

        with tf.variable_scope("v_stack_1" + i):
            v_stack_1 = gated_cnn([1, 1, f_map], v_stack_in, gated=False, mask=mask)
            # 摘要
            dnn_tools.activation_summary(v_stack_1)

        with tf.variable_scope("h_stack" + i):
            h_stack = gated_cnn([1, filter_size, f_map], h_stack_in, payload=v_stack_1, mask=mask)
            # 摘要
            dnn_tools.activation_summary(h_stack)

        with tf.variable_scope("h_stack_1" + i):
            h_stack_1 = gated_cnn([1, 1, f_map], h_stack, gated=False, mask=mask)
            if residual:
                h_stack_1 += h_stack_in
            h_stack_in = h_stack_1
            # 摘要
            dnn_tools.activation_summary(h_stack_in)

    with tf.variable_scope("fc_1"):
        fc1 = gated_cnn([1, 1, f_map], h_stack_in, gated=False, mask='b')
        # 摘要
        dnn_tools.activation_summary(fc1)

    color = 256
    with tf.variable_scope("fc_2"):
        fc2 = gated_cnn([1, 1, image_channel * color], fc1, gated=False, mask='b', activation=False)
        fc2 = tf.reshape(fc2, (-1, color))
        # 摘要
        dnn_tools.activation_summary(fc2)

        return fc2


def train_pixel_cnn():
    output = pixel_cnn()

    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, labels=tf.cast(tf.reshape(X, [-1]), dtype=tf.int32)))
    tf.add_to_collection('losses', loss)
    total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
    # 摘要
    dnn_tools.add_loss_summaries(total_loss=total_loss)

    trainer = tf.train.RMSPropOptimizer(1e-3)
    gradients = trainer.compute_gradients(loss)
    clipped_gradients = [(tf.clip_by_value(_[0], -1, 1), _[1]) for _ in gradients]
    optimizer = trainer.apply_gradients(clipped_gradients)

    # Add histograms for trainable variables.
    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name, var)

    # Add histograms for gradients.
    for grad, var in gradients:
        if grad is not None:
            tf.summary.histogram(var.op.name + '/gradients', grad)

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()
        saver = tf.train.Saver(tf.trainable_variables())

        # sess.run(tf.initialize_all_variables())
        sess.run(init)
        summary_writer = tf.summary.FileWriter('./train/', sess.graph)

        for epoch in range(1):
            for batch in range(n_batches):
                batch_X = img_data[batch_size * batch: batch_size * (batch + 1)]

                _, cost = sess.run([optimizer, loss], feed_dict={X: batch_X})

                # if batch % 30 == 1:
                #     summary_str = sess.run(summary_op, feed_dict={X: batch_X})
                #     summary_writer.add_summary(summary_str, batch)

                print("epoch:", epoch, '  batch:', batch, '  cost:', cost)
            if epoch % 7 == 0:
                saver.save(sess, "./model/pixel_cnn.ckpt", global_step=epoch)

            if epoch % 4 == 1:
                summary_str = sess.run(summary_op, feed_dict={X: batch_X})
                summary_writer.add_summary(summary_str, epoch)

        summary_writer.close()


def generate_picture():
    output = pixel_cnn()

    predict = tf.reshape(tf.multinomial(tf.nn.softmax(output), num_samples=1, seed=100), tf.shape(X))
    # predict_argmax = tf.reshape(tf.argmax(tf.nn.softmax(output), dimension=tf.rank(output) - 1), tf.shape(X))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(tf.trainable_variables())
        saver.restore(sess, './model/pixel_cnn.ckpt-0')

        pics = np.zeros((1 * 1, image_height, image_width, image_channel), dtype=np.float32)

        for i in range(image_height):
            for j in range(image_width):
                for k in range(image_channel):
                    next_pic = sess.run(predict, feed_dict={X: pics})
                    pics[:, i, j, k] = next_pic[:, i, j, k]

        cv2.imwrite('girl.jpg', pics[0])
        print('生成妹子图: girl.jpg')


if __name__ == '__main__':
    # 训练
    train_pixel_cnn()

    # 生成图像
    # generate_picture()