from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers.core import Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import UpSampling2D
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Flatten
from keras.optimizers import SGD
from keras.datasets import mnist
import numpy as np
from PIL import Image
import argparse
import math

def generator_model():
    model = Sequential()
    model.add(Dense(input_dim=100, output_dim=1024))  #因为噪音数据的尺寸为 Batch_size, 100，所以输入尺寸为100
    model.add(Activation('tanh'))
    model.add(Dense(128*7*7))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Reshape((7, 7, 128), input_shape=(128*7*7,)))
    model.add(UpSampling2D(size=(2, 2)))  #上采样
    model.add(Conv2D(64, (5, 5), padding='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(1, (5, 5), padding='same'))
    model.add(Activation('tanh'))
    return model


def discriminator_model():
    model = Sequential()
    model.add(
            Conv2D(64, (5, 5),
            padding='same',
            input_shape=(28, 28, 1))
            )
    model.add(Activation('tanh'))  #激活函数
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(128, (5, 5)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))   #sigmoid用来做二分类，所以应该是用来分真假
    return model


def generator_containing_discriminator(g, d):
    model = Sequential()
    model.add(g)
    d.trainable = False  #属于父类NETWORK的一个属性，但不知道作用
    model.add(d)
    return model

# 此处应该是将BATCH_SIZE张小图片 组合成 一张合成图，看效果用
def combine_images(generated_images):
    num = generated_images.shape[0]  #图片数量？？？
    width = int(math.sqrt(num))  # width = num的平方根？？？
    height = int(math.ceil(float(num)/width))
    shape = generated_images.shape[1:3]  #获得图片的尺寸
    image = np.zeros((height*shape[0], width*shape[1]),
                     dtype=generated_images.dtype)
    # 将BATCH_SIZE张图片各自拿出，并将值各自附在image上
    for index, img in enumerate(generated_images):
        i = int(index/width)
        j = index % width
        image[i*shape[0]:(i+1)*shape[0], j*shape[1]:(j+1)*shape[1]] = \
            img[:, :, 0]
    return image


def train(BATCH_SIZE):
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = (X_train.astype(np.float32) - 127.5)/127.5  #范围调整为[-1,1]，应该是归一化处理
    X_train = X_train[:, :, :, None]  #将原来的向量由3维转为4维
    X_test = X_test[:, :, :, None]
    # X_train = X_train.reshape((X_train.shape, 1) + X_train.shape[1:])
    d = discriminator_model()  #产生一个鉴别器模型（鉴别真假？？）
    g = generator_model()  #产生一个生成器模型（产生假图片，用于鉴别器与真实图片比较）
    d_on_g = generator_containing_discriminator(g, d)  #两个模型结合
    d_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
    g_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
    g.compile(loss='binary_crossentropy', optimizer="SGD")
    d_on_g.compile(loss='binary_crossentropy', optimizer=g_optim)
    d.trainable = True
    d.compile(loss='binary_crossentropy', optimizer=d_optim)
    for epoch in range(100):  #跑代
        print("***********************************************")
        print("Epoch is", epoch)
        print("Number of batches", int(X_train.shape[0]/BATCH_SIZE))
        for index in range(int(X_train.shape[0]/BATCH_SIZE)):  #每一代跑多少批数据
            noise = np.random.uniform(-1, 1, size=(BATCH_SIZE, 100))  # 二维噪音数据（数量：batch_size, 100 范围：-1,1）
            image_batch = X_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE]  #取每一批训练集
            # 此处生成器根据噪音数据生成图片
            # generated_images与image_batch维度一样
            # 也有BATCH_SIZE张
            generated_images = g.predict(noise, verbose=0)
            # 每20批数据就将生成器BATCH_SIZE张图片合成一张图，保存下来看效果！
            if index % 20 == 0:
                image = combine_images(generated_images)
                image = image*127.5+127.5 # 将生成的图片逆归一化复原
                Image.fromarray(image.astype(np.uint8)).save(
                    str(epoch)+"_"+str(index)+".png")
            X = np.concatenate((image_batch, generated_images))  # 图片在一维上结合（前一半是真，后一半是假）
            y = [1] * BATCH_SIZE + [0] * BATCH_SIZE  # 与X一维同样数量的数字集，前一半是1，后一半是0（前一半是真，后一半是假）
            d_loss = d.train_on_batch(X, y)  # 用于鉴别器训练
            print("batch %d d_loss : %f" % (index, d_loss))
            noise = np.random.uniform(-1, 1, (BATCH_SIZE, 100))
            d.trainable = False  # 这里虽然模型结合，但是禁止鉴别器的训练（应该是禁止鉴别器的权重梯度下降，从而阻止它改变）
            # 为了让生成器生成的数据更逼真，所以要求结果为真（1）【因为结合模型的结尾是鉴别器】。
            # 若为0，则损失值大，再调整生成器权重（不知此处生成器损失值是否只有它自己的那部分？？）
            g_loss = d_on_g.train_on_batch(noise, [1] * BATCH_SIZE)  # 只训练生成器，但鉴别器这里权重应该不改变
            d.trainable = True
            print("batch %d g_loss : %f" % (index, g_loss))
            if index % 10 == 9:  # 保存模型权重
                g.save_weights('generator', True)
                d.save_weights('discriminator', True)

# 该方法应该是产生一个逼真的假图（即训练时生成器生成的那种！！）
def generate(BATCH_SIZE, nice=False):
    g = generator_model()
    g.compile(loss='binary_crossentropy', optimizer="SGD")
    g.load_weights('generator')
    if nice:
        d = discriminator_model()
        d.compile(loss='binary_crossentropy', optimizer="SGD")
        d.load_weights('discriminator')
        noise = np.random.uniform(-1, 1, (BATCH_SIZE*20, 100))
        generated_images = g.predict(noise, verbose=1)
        d_pret = d.predict(generated_images, verbose=1)
        index = np.arange(0, BATCH_SIZE*20)
        index.resize((BATCH_SIZE*20, 1))
        pre_with_index = list(np.append(d_pret, index, axis=1))
        pre_with_index.sort(key=lambda x: x[0], reverse=True)
        nice_images = np.zeros((BATCH_SIZE,) + generated_images.shape[1:3], dtype=np.float32)
        nice_images = nice_images[:, :, :, None]
        for i in range(BATCH_SIZE):
            idx = int(pre_with_index[i][1])
            nice_images[i, :, :, 0] = generated_images[idx, :, :, 0]
        image = combine_images(nice_images)
    else:
        noise = np.random.uniform(-1, 1, (BATCH_SIZE, 100))
        generated_images = g.predict(noise, verbose=1)
        image = combine_images(generated_images)
    image = image*127.5+127.5
    Image.fromarray(image.astype(np.uint8)).save(
        "generated_image.png")


def get_args():
    parser = argparse.ArgumentParser()
    # parser.add_argument("--mode", type=str)
    parser.add_argument("--mode", type=str, default="train")
    parser.add_argument("--batch_size", type=int, default=128)
    parser.add_argument("--nice", dest="nice", action="store_true")
    parser.set_defaults(nice=False)
    args = parser.parse_args()
    return args

if __name__ == "__main__":

    args = get_args() #设置参数
    if args.mode == "train":
        train(BATCH_SIZE=args.batch_size)
    elif args.mode == "generate":
        generate(BATCH_SIZE=args.batch_size, nice=args.nice)
