from __future__ import division
import os
import time
import math
import tensorflow as tf
import numpy as np
import scipy.io as sio
from utils import SeismicData
from ops import conv3d, deconv3d, dilated_conv3d, lrelu, linear

class TGLCGAN(object):
    def __init__(self, sess, batch_size=64, sample_shape=[32, 32, 64], gfc_channels=256,
                 dfc_channels=256, sampling_rate=0.7, lam=0.1, dataset_name='seis_1', data_name='T',
                 checkpoint_dir='./checkpoint', mask_type='random_trace'):
        '''
        TGLCGAN: Tensor Globally and Locally consistent Completion GAN. This model is designed
                 to complete 3D dataset and it's an extension of the paper "Globally and Locally
                 Consistent Image Comletion"
        Args:
            sess: TensorFlow session
            batch_size: The size of batch
            sample_shape: The shape of single sample, It will be same with the generated data,
                          [height, width, depth]
            dfc_channels: Channels of discrim units for fully connected layer
            dataset_path: The path of dataset, [Attention] The data format must be '.mat'
            data_name: Name of the data in dataset, not the file name
            checkpoint_dir:
            mask_type: The type of mask to subsampling 3D data. [block, random_element,
                       random_trace]
            lam: the tradeoff between contextual_loss and perceptual_loss
        '''
        self.sess = sess
        self.batch_size = batch_size
        self.sample_shape = sample_shape
        self.gfc_channels = gfc_channels
        self.dfc_channels = dfc_channels

        self.lam = lam

        if os.path.exists('./data'):
            self.dataset_path = os.path.join('./data', dataset_name)
            print(' [*] dataset path is "%s"' % self.dataset_path)
        else:
            print(' [!] dataset path "%s" is not exist' % self.dataset_path)

        self.dataset_name = dataset_name
        self.data_name = data_name
        self.checkpoint_dir = checkpoint_dir

        self.sampling_rate = sampling_rate
        self.mask_type = mask_type

        self.build_model()

    def build_model(self):
        self.samples = tf.placeholder(tf.float32, [self.batch_size] + self.sample_shape + [1],
                                     name='real_data')                  # original full data
        self.mask = tf.placeholder(tf.float32, self.sample_shape + [1], name='mask') # mask
        self.subsamples = self.__batch_multiply(self.samples, self.mask) # subsampled data

        self.G = self.generator(self.subsamples)
        self.D = self.discriminator(self.samples)           # real
        self.D_ = self.discriminator(self.G, reuse=True)    # fake

        self.C = self.generator(self.samples, name="compeletion")               # completion

        self.d_sum = tf.summary.histogram("d", self.D)
        self.d__sum = tf.summary.histogram("d_", self.D_)
        self.g_sum = tf.summary.histogram("g", self.G)

        self.d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.D, labels=tf.ones_like(self.D)))

        self.d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.D_, labels=tf.zeros_like(self.D_)))

        self.d_loss_real_sum = tf.summary.histogram("d_loss_real", self.d_loss_real)
        self.d_loss_fake_sum = tf.summary.histogram("d_loss_fake", self.d_loss_fake)

        self.g_loss_cross_enropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.D_, labels=tf.ones_like(self.D_)))

        self.g_loss_contextual = tf.reduce_sum(tf.contrib.layers.flatten(
            tf.abs(self.samples - self.G)))   # Completion

        self.g_loss = self.g_loss_cross_enropy + self.lam * self.g_loss_contextual

        self.d_loss = self.d_loss_fake + self.d_loss_real

        self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
        self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)

        t_vars = tf.trainable_variables()

        self.d_vars = [var for var in t_vars if 'd_' in var.name]
        self.g_vars = [var for var in t_vars if 'g_' in var.name]

        self.saver = tf.train.Saver(max_to_keep=1)

    def generator(self, subsamples, name="generator"):
        with tf.variable_scope(name) as scope:
            # coding
            h0 = lrelu(conv3d(subsamples, 32, s_d=1, s_h=1, s_w=1, name='g_h0_conv3d'))

            h1 = lrelu(conv3d(h0, 64, f_d=3, f_w=3, f_h=3, name='g_h1_conv3d')) # 1/2
            h2 = lrelu(conv3d(h1, 64, f_d=3, f_w=3, f_h=3, s_d=1, s_h=1, s_w=1, name='g_h2_conv3d'))

            h3 = lrelu(conv3d(h2, 128, f_d=3, f_h=3, f_w=3, name='g_h3_conv3d')) # 1/4

            h4 = lrelu(dilated_conv3d(h3, name='g_h4_dilated_conv3d')) # 1/4
            h5 = lrelu(dilated_conv3d(h4, dilation=4, name='g_h5_dilated_conv3d'))

            # decoding
            h6 = lrelu(conv3d(h5, 128, f_d=3, f_w=3, f_h=3, s_d=1, s_h=1, s_w=1, name='g_h6_conv3d')) # 1/4

            depth, height, width = self.sample_shape[0], self.sample_shape[1], self.sample_shape[2]
            h7 = lrelu(deconv3d(h6, [self.batch_size, depth//2, height//2, width//2, 64], f_d=4, f_h=4, f_w=4, name='g_h7_deconv3d'))    # 1/2
            h8 = lrelu(conv3d(h7, 64, f_d=3, f_w=3, f_h=3, s_d=1, s_h=1, s_w=1, name='g_h8_conv3d'))

            h9 = lrelu(deconv3d(h8, [self.batch_size] + self.sample_shape + [32], name='g_h9_deconv3d')) # 1
            h10 = lrelu(conv3d(h9, 1, f_d=3, f_w=3, f_h=3, s_d=1, s_h=1, s_w=1, name='g_h10_conv3d'))

            return h10

    def discriminator(self, inputs, reuse=False):
        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()

            # coding
            h0 = lrelu(conv3d(inputs, 32,  name='d_h0_conv3d'))  # 1/2
            h1 = lrelu(conv3d(h0, 64, name='d_h1_conv3d'))      # 1/4
            h2 = lrelu(conv3d(h1, 128, name='d_h2_conv3d'))      # 1/8
            # h3 = lrelu(conv3d(h2, name='d_h3_conv3d'))      # 1/16
            h3 = linear(tf.reshape(h2, [self.batch_size, -1]), 1, scope='d_h3_linear')

            return tf.nn.sigmoid(h3), h3

    def train(self, config):
        d_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1)\
                  .minimize(self.d_loss, var_list=self.d_vars)
        g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1)\
                  .minimize(self.g_loss, var_list=self.g_vars)

        tf.global_variables_initializer().run()

        self.g_sum_1 = tf.summary.merge([self.d__sum, self.g_sum, self.d_loss_fake_sum, self.g_loss_sum])
        self.d_sum_1 = tf.summary.merge([self.d_sum, self.d_loss_real_sum, self.d_loss_sum])

        if not os.path.exists('./logs'):
            os.makedirs('./logs')

        self.writer = tf.summary.FileWriter('./logs', self.sess.graph)

        # load data
        seismic = SeismicData(self.dataset_path, data_name=self.data_name, batch_size=self.batch_size,
                              sample_shape=self.sample_shape)

        self.__load(config.checkpoint_dir)  # load existing model

        print(' [*] Begining training!!!!')
        start_time = time.time()
        counter = 1
        for epoch in xrange(config.epoch):
            seismic.reset_batch_status()
            for idx in xrange(seismic.batch_cnt):
                batch_samples = seismic.next_batch()
                mask = self.__generate_mask()

                print(' [*] Update discriminator....' )
                _, summary_str = self.sess.run([d_optim, self.d_sum_1],
                                               feed_dict={self.samples:batch_samples,
                                                          self.mask:mask})

                self.writer.add_summary(summary_str, counter)

                print(' [*] Update G network....')
                _, summary_str = self.sess.run([g_optim, self.g_sum_1],
                                               feed_dict={self.samples:batch_samples,
                                                          self.mask:mask})
                self.writer.add_summary(summary_str, counter)

                # Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
                _, summary_str = self.sess.run([g_optim, self.g_sum_1],
                                               feed_dict={self.samples:batch_samples,
                                                          self.mask:mask})
                self.writer.add_summary(summary_str, counter)

                errD_fake = self.d_loss_fake.eval(feed_dict={self.samples:batch_samples,
                                                             self.mask:mask})

                errD_real = self.d_loss_real.eval({self.samples:batch_samples})

                errG = self.g_loss.eval(feed_dict={self.samples:batch_samples,
                                                   self.mask:mask})

                counter += 1
                print("Epoch: [{:2d}] [{:4d}/{:4d}] time: {:4.4f}, d_loss: {:.8f}, g_loss: {:.8f}".format(
                      epoch, idx, seismic.batch_cnt, time.time() - start_time, errD_fake + errD_real, errG))

                if np.mod(counter, 100) == 1:
                    pass
                if np.mod(counter, 500) == 2:
                    self.__save(config.checkpoint_dir, counter)

    def compeletion(self, config):
        def make_dir(name):
            p = os.path.join(config.out_dir, name)
            if not os.path.exists(p):
                os.makedirs(p)

        make_dir('completed')

        tf.global_variables_initializer().run()

        is_load = self.__load(self.checkpoint_dir)
        assert(is_load)

        seismic = SeismicData(self.dataset_path, data_name=self.data_name, batch_size=self.batch_size,
                              sample_shape=self.sample_shape)

        print(' [*] Begining completion......')
        tmp_data = []
        for i in xrange(seismic.batch_cnt):
            # tmp_data[i*seismic.batch_size : (i+1)*seismic.batch_size, ] = tf.reshape()
            tmp_data.append(self.sess.run(self.C, feed_dict={self.samples:seismic.next_batch()}))

        sio.savemat('./out/completed/' + config.dataset_name + '.mat', seismic.reconstruct(np.stack(tmp_data)))

    def __batch_multiply(self, batch_samples, mask): # batch point product
        shape = batch_samples.get_shape().as_list()
        subsamples = []

        for i in range(shape[0]):
            subsamples.append(tf.multiply(batch_samples[i, :, :, :,:], mask))

        return tf.stack(subsamples)

    def __generate_mask(self):
        if self.mask_type == 'block':
            pass
        elif self.mask_type == 'random_element':
            pass
        elif self.mask_type == 'random_trace':
            mask_mat = np.zeros(self.sample_shape[0:2], dtype=np.float32)
            mask_mat[np.random.rand(self.sample_shape[0], self.sample_shape[1]) < self.sampling_rate] = 1
            mask_ten = np.repeat(mask_mat[:, :, np.newaxis], self.sample_shape[2], 2)

            return np.reshape(mask_ten, list(mask_ten.shape) + [1])

        else:
            pass

    def __save(self, checkpoint_dir, step):
        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)

        model_name = 'TGLCGAN'
        self.saver.save(self.sess,
                        os.path.join(checkpoint_dir, model_name),
                        global_step=step)

    def __load(self, checkpoint_dir):
        print(" [*] Reading checkpoints...")

        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            self.saver.restore(self.sess, ckpt.model_checkpoint_path)
            print(' [*] Load success........')
            return True
        else:
            print(' [!] Load failed........')
            return False
