from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers.core import Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import UpSampling2D
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Flatten
from keras.optimizers import SGD
from keras.datasets import mnist
import numpy as np
from PIL import Image
import math
from src.core import *
from src.utils import *

# 将噪音数据从100改为10000

class DCGAN(object):

    def generator_model(self):
        model = Sequential()
        # model.add(Dense(input_dim=100, output_dim=1024))  # 因为噪音数据的尺寸为 Batch_size, 100，所以输入尺寸为100

        model.add(Dense(input_dim=10000, output_dim=2048))  # 因为噪音数据的尺寸为 Batch_size, 100，所以输入尺寸为100

        model.add(Activation('tanh'))
        model.add(Dense(32 * 32 * 32))  # 16 * 32 * 32（原先自己）
        model.add(BatchNormalization())
        model.add(Activation('tanh'))
        model.add(Reshape((32, 32, 32), input_shape=(32 * 32 * 32,)))

        model.add(UpSampling2D(size=(2, 2)))  # 上采样
        model.add(Conv2D(64, (5, 5), padding='same'))
        model.add(Activation('tanh'))

        model.add(UpSampling2D(size=(2, 2)))  # 上采样
        model.add(Conv2D(32, (5, 5), padding='same'))
        model.add(Activation('tanh'))

        model.add(UpSampling2D(size=(2, 2)))  # 上采样
        model.add(Conv2D(3, (5, 5), padding='same'))
        model.add(Activation('tanh'))
        return model

    def discriminator_model(self):
        model = Sequential()
        model.add(
                Conv2D(8, (5, 5),
                padding='same',
                input_shape=(256, 256, 3))
                )
        model.add(Activation('tanh'))  #激活函数
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(32, (5, 5)))
        model.add(Activation('tanh'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(64, (5, 5)))
        model.add(Activation('tanh'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(2048))
        model.add(Activation('tanh'))

        model.add(Dense(1024))
        model.add(Activation('tanh'))

        model.add(Dense(1))
        model.add(Activation('sigmoid'))   #sigmoid用来做二分类，所以应该是用来分真假
        return model

    # 生成器在前，鉴别器在后
    def generator_containing_discriminator(self, g, d):
        model = Sequential()
        model.add(g)
        d.trainable = False  # 属于父类NETWORK的一个属性，但不知道作用
        model.add(d)
        return model


    # 对多张图片缩放并组合成一张图(貌似只对二值图，因为只有二维)
    def combine_binaey_images(self, generated_images):
        num = generated_images.shape[0]  # 图片数量？？？
        width = int(math.sqrt(num))  # width = num的平方根？？？
        height = int(math.ceil(float(num) / width))
        shape = generated_images.shape[1:3]  # 获得图片的尺寸
        image = np.zeros((height * shape[0], width * shape[1]),
                         dtype=generated_images.dtype)
        for index, img in enumerate(generated_images):
            i = int(index / width)
            j = index % width
            image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1]] = \
                img[:, :, 0]
        return image

    # 对彩图缩放并组合
    def combine_RGB_images(self, generated_images):
        num = generated_images.shape[0]  # 图片数量？？？
        width = int(math.sqrt(num))  # width = num的平方根？？？
        height = int(math.ceil(float(num) / width))
        shape = generated_images.shape[1:3]  # 获得图片的尺寸
        image = np.zeros((height * shape[0], width * shape[1], 3),
                         dtype=generated_images.dtype)
        for index, img in enumerate(generated_images):
            i = int(index / width)
            j = index % width
            image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], 0] = \
                img[:, :, 0]
            image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], 1] = \
                img[:, :, 1]
            image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], 2] = \
                img[:, :, 2]
        return image


    def train(self, filename, batch_size, save_result_path, epochs = 100, combined_batch_index = 20, epoch_index = 5):
        d = self.discriminator_model()  # 产生一个鉴别器模型（鉴别真假？？）
        g = self.generator_model()  # 产生一个生成器模型（产生假图片，用于鉴别器与真实图片比较）
        d_on_g = self.generator_containing_discriminator(g, d)  # 两个模型结合
        d_optim = SGD(lr=0.00001, momentum=0.9, nesterov=True)  # 0.0005（原本）
        g_optim = SGD(lr=0.00001, momentum=0.9, nesterov=True)
        g.compile(loss='binary_crossentropy', optimizer="SGD")
        d_on_g.compile(loss='binary_crossentropy', optimizer=g_optim)
        d.trainable = True
        d.compile(loss='binary_crossentropy', optimizer=d_optim)

        data = Data()

        for epoch in range(epochs):  # 跑代
            print("***********************************************")
            print("Epoch is", epoch)

            for index, imgs in data.read_image(filename, batch_size):
                np_imgs = np.asarray(imgs, np.float32)
                trained_imgs = (np_imgs.astype(np.float32) - 127.5) / 127.5
                noise = np.random.uniform(-1, 1, size=(batch_size, 10000))
                generated_images = g.predict(noise, verbose=0)
                # 每combined_batch_index批数据后，就将生成器BATCH_SIZE张图片合成一张图，保存下来看效果！
                if index % combined_batch_index == 0:
                    image = self.combine_RGB_images(generated_images)
                    image = image * 127.5 + 127.5
                    Image.fromarray(image.astype(np.uint8)).save( save_result_path + r"\trained_result\temp" + "//" +
                        str(epoch) + "_epoch_" + str(index) + "_batch" + ".png")
                X = np.concatenate((trained_imgs, generated_images))  # 图片在一维上结合（前一半是真，后一半是假）
                y = [1] * batch_size + [0] * batch_size  # 与X一维同样数量的数字集，前一半是1，后一半是0（前一半是真，后一半是假）
                d_loss = d.train_on_batch(X, y)  # 用于鉴别器训练
                print("batch %d d_loss : %f" % (index, d_loss))
                noise = np.random.uniform(-1, 1, (batch_size, 10000))
                d.trainable = False  # 这里虽然模型结合，但是禁止鉴别器的训练（应该是禁止鉴别器的权重梯度下降，从而阻止它改变）
                # 为了让生成器生成的数据更逼真，所以要求结果为真（1）【因为结合模型的结尾是鉴别器】。
                # 若为0，则损失值大，再调整生成器权重（不知此处生成器损失值是否只有它自己的那部分？？）
                g_loss = d_on_g.train_on_batch(noise, [1] * batch_size)  # 只训练生成器，但鉴别器这里权重应该不改变
                d.trainable = True
                print("batch %d g_loss : %f" % (index, g_loss))
            if epoch % epoch_index == 0:  # 保存模型权重
                g.save_weights(save_result_path + r"\weights\temp" + "//" + str(epoch) + "_epoch_" + 'generator' , True)
                d.save_weights(save_result_path + r"\weights\temp" + "//" + str(epoch) + "_epoch_" + 'discriminator' , True)


    # 该方法应该是产生一个逼真的假图（即训练时生成器生成的那种！！）
    # nice决定是否让鉴别器判断生成图片的好坏，再决定存储
    def generate(self, batch_size, g_model_path, d_model_path, result_path, nice=False):
        g = self.generator_model()
        g.compile(loss='binary_crossentropy', optimizer="SGD")
        g.load_weights(g_model_path)

        # nice部分还没更改！！！！
        if nice:
            d = self.discriminator_model()
            d.compile(loss='binary_crossentropy', optimizer="SGD")
            d.load_weights(d_model_path)
            noise = np.random.uniform(-1, 1, (batch_size * 20, 100))
            generated_images = g.predict(noise, verbose=1)
            d_pret = d.predict(generated_images, verbose=1)
            index = np.arange(0, batch_size * 20)
            index.resize((batch_size * 20, 1))
            pre_with_index = list(np.append(d_pret, index, axis=1))
            pre_with_index.sort(key=lambda x: x[0], reverse=True)
            nice_images = np.zeros((batch_size,) + generated_images.shape[1:3], dtype=np.float32)
            nice_images = nice_images[:, :, :, None]
            for i in range(batch_size):
                idx = int(pre_with_index[i][1])
                nice_images[i, :, :, 0] = generated_images[idx, :, :, 0]
            image = self.combine_images(nice_images)
        else:
            noise = np.random.uniform(-1, 1, (batch_size, 10000))
            generated_images = g.predict(noise, verbose=1)
            # image = self.combine_images(generated_images)
            imgs = generated_images
        imgs = imgs * 127.5 + 127.5
        data = Data()
        order = 1
        for img in imgs:
            data.save_image(img, result_path + "//" + r"generated_result\temp", str(order), "RGB")
            order += 1

        # image = image * 127.5 + 127.5
        # Image.fromarray(image.astype(np.uint8)).save( result_path + r"\generated_result\temp" + "//" +
        #     "generated_image.png")

if __name__ == '__main__':
    # params = Params()
    # params.load_config_file()
    batch_size = 50
    epochs = 800
    filename = r"H:\wangjianlian\data\formal_data\HER2\thrid_generation\val\20X\g1"
    save_result_path = r"..\..\result"
    dcgan = DCGAN()
    dcgan.train(filename, batch_size, save_result_path, epochs, combined_batch_index = 5)

    # batch_size = 2500
    # g_model_path = r'H:\wangjianlian\project\Python\dcgan\result\weights\nice\20X_g0\495_epoch_generator'
    # d_model_path = r'H:\wangjianlian\project\Python\dcgan\result\weights\nice\20X_g0\495_epoch_discriminator'
    # dcgan.generate(batch_size, g_model_path, d_model_path, save_result_path, False)