#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 23 13:30:31 2016

@author: kevin
"""
from getStrokeLabel import genStrokes
import tensorflow as tf
import numpy as np
from utils import weight_variable, bias_variable, loglikelihood
from config import Config
from myGlimpse import GlimpseNet, LocNet
from myCNN import CNN2
import os, datetime,h5py
#import matplotlib.pyplot as plt

start_time=datetime.datetime.now()


rnn_cell = tf.nn.rnn_cell
seq2seq = tf.nn.seq2seq

config = Config()
n_steps = config.step

loc_mean_arr = []
sampled_loc_arr = []
alpha_est_arr = []

draw = genStrokes(config, )
cnn2 = CNN2(config,)

# placeholders
image_ph = tf.placeholder(tf.float32, [None, config.img_sz, config.img_sz, config.img_channels])
strokeMap_ph = tf.placeholder(tf.float32, [None, config.img_sz, config.img_sz, config.img_channels])
gtAlpha_ph = tf.placeholder(tf.float32, [None, config.alphaMatte_sz, config.alphaMatte_sz])
init_feature=tf.placeholder(tf.float32,[None,config.rnn_input_sz])
ideal_seq=tf.placeholder(tf.float32,[None,config.num_glimpses+1,2])
#Bgloc_ph=tf.placeholder(tf.float32,[None,2])
loc_seq=[]  #[step,N,2]

def get_next_input(rnn_output, i):
    """ take the output of RNN as input
        produce the next gt vector for RNN
    """
    global strokeMaps
    global images
    global gtAlphas
    loc, loc_mean = loc_net(rnn_output) 
    loc_seq.append(loc)
    strokeMaps = tf.py_func(draw.drawstroke, [strokeMaps, loc, gtAlphas], tf.float32) 
    alpha_est =  tf.py_func(cnn2.op,[images,strokeMaps],tf.double)
    alpha_est = tf.cast(alpha_est,tf.float32)
    alpha_est_arr.append(alpha_est)
    gl_next = glimpse_net(loc,alpha_est)
    loc_mean_arr.append(loc_mean)
    sampled_loc_arr.append(loc)
    return gl_next

# build the auxiliary glimpse-net && loc-net
with tf.variable_scope('glimpse_net'):
    glimpse_net = GlimpseNet(config, )
with tf.variable_scope('loc_net'):
    loc_net = LocNet(config, )


# number of examples: N == M*batch
N = tf.shape(image_ph)[0]

#first_loc,_ = loc_net(init_feature)




strokeMaps = strokeMap_ph
images = image_ph
images_gray = tf.expand_dims(images[:,:,:,0],-1)
gtAlphas = tf.expand_dims(gtAlpha_ph, -1)
#Bg_loc=Bgloc_ph


# RNN --- LSTM
lstm_cell = rnn_cell.BasicLSTMCell(config.rnn_input_sz, forget_bias=1.0, state_is_tuple=True)


init_state=lstm_cell.zero_state(N,tf.float32)

rnn_init_input = [init_feature]

with tf.variable_scope('first_rnn'):
      rnn_init_output, rnn_init_states = seq2seq.rnn_decoder(rnn_init_input, init_state, lstm_cell)
rnn_init_output=tf.squeeze(rnn_init_output)
first_loc, _ = loc_net(rnn_init_output)
loc_seq.append(first_loc)

strokeMaps = tf.py_func(draw.drawstroke, [strokeMaps, first_loc, gtAlphas], tf.float32)
#strokeMaps = tf.py_func(draw.drawstroke, [strokeMaps, Bg_loc, gtAlphas], tf.float32)

alpha_est = tf.py_func(cnn2.op,[images,strokeMaps],tf.double)


alpha_est = tf.cast(alpha_est,tf.float32)
alpha_est_arr.append(alpha_est)

first_gl = glimpse_net(first_loc, alpha_est)


rnn_inputs = [first_gl]   
rnn_inputs.extend([0]*(config.num_glimpses))
rnn_outputs, rnn_states = seq2seq.rnn_decoder(rnn_inputs, rnn_init_states, lstm_cell, loop_function=get_next_input)



# Baseline Network for b_t
with tf.variable_scope('baseline'):
    w_baseline = weight_variable((config.rnn_output_sz, 1))
    b_baseline = bias_variable((1,))
baselines = []
for t, output in enumerate(rnn_outputs[1:]):
    baseline_t = tf.nn.xw_plus_b(output, w_baseline, b_baseline)
    baseline_t = tf.squeeze(baseline_t) # remove dimensions of size 1
    baselines.append(baseline_t)
baselines = tf.pack(baselines)      # [timesteps, batch*M]
baselines = tf.transpose(baselines) # [batch*M, timesteps]

###########################################
# define the loss for training
#alpha_last = tf.squeeze(alpha_est_arr[0]) 
#alpha_this = tf.squeeze(alpha_est_arr[-1])

#mse_loss_this = tf.reduce_mean(tf.square(alpha_this - gtAlpha_ph),reduction_indices=[1,2])
#mse_loss_last = tf.reduce_mean(tf.square(alpha_last - gtAlpha_ph),reduction_indices=[1,2])

# 0/1 reward
#reward = tf.cast(tf.less_equal(mse_loss_this, mse_loss_last), tf.float32)      
#rewards = tf.expand_dims(reward, 1)
#rewards = tf.tile(rewards, (1, config.num_glimpses))                     # [batch_sz*M, timesteps]
rewards = []
for i in range(config.num_glimpses):
     alpha_last = tf.squeeze(alpha_est_arr[i])
     alpha_this = tf.squeeze(alpha_est_arr[i+1])
     mse_loss_this = tf.reduce_mean(tf.square(alpha_this - gtAlpha_ph),reduction_indices=[1,2])  # batch_loss
     mse_loss_last = tf.reduce_mean(tf.square(alpha_last - gtAlpha_ph),reduction_indices=[1,2])
     reward = tf.cast(tf.less_equal(mse_loss_this, mse_loss_last), tf.float32)      # reward of batch
     rewards.append(reward)
     
rewards = tf.pack(rewards)  # rewards of sequences
rewards = tf.transpose(rewards)
loc_compute = tf.pack(sampled_loc_arr) # [timesteps, batch_sz, loc_dim]

loc_avg = tf.reduce_mean(loc_compute) # average over all dims
loc_var = tf.reduce_mean(tf.square(tf.reduce_mean(loc_compute, reduction_indices=[1,2]) - loc_avg))



logll = loglikelihood(tf.cast(loc_mean_arr,tf.float64), tf.cast(sampled_loc_arr,tf.float64), tf.cast(config.loc_std,tf.float64))     # [batch_sz*M, timesteps]
logll = tf.cast(logll,tf.float32)
advs = rewards - tf.stop_gradient(baselines)
logllratio = tf.reduce_mean(logll * advs)
reward = tf.reduce_mean(reward)    # average reward over batch_sz*M
# baseline_mse
baseline_mse = tf.reduce_mean(tf.square((rewards - baselines)))

# collect all trainable variables
var_list = tf.trainable_variables()

#loc loss
loc_seq=tf.transpose(loc_seq,[1,0,2])
loc_loss = tf.reduce_mean(tf.square(loc_seq-ideal_seq))

# hybrid loss
loss = -logllratio + baseline_mse - loc_var+100*loc_loss
grads = tf.gradients(loss, var_list)
grads, _ = tf.clip_by_global_norm(grads, config.max_grad_norm)                 # grad clipping

# learning rate
global_step = tf.get_variable('global_step',[],initializer=tf.constant_initializer(0), trainable=False)
starter_learning_rate = config.lr_start

learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, config.lrDecayFreq, 0.97, staircase=True)
learning_rate = tf.maximum(learning_rate, config.lr_min)
opt = tf.train.AdamOptimizer(learning_rate)
train_op = opt.apply_gradients(zip(grads, var_list), global_step=global_step)
saver=tf.train.Saver()

def read_data(path):
     images = np.array(h5py.File(path, 'r').get('images'))
     labels = np.array(h5py.File(path, 'r').get('labels'))
     return images, labels
     
def read_prob(path):
     probs = np.array(h5py.File(path, 'r').get('probs'))
     return probs

def read_loc(path):
    locs = np.array(h5py.File(path, 'r').get('locs'))
    return locs

def pre_processStroke(images,alphas):
    Background=(0,0,0)
    Foreground=(255,255,255)
    [batch_size,width,height,ch]=images.shape
    strokes=images.copy()
    for i in range(batch_size):
        img=images[i]
        alpha=alphas[i]
        for y in range (10):                  
            for x in range (width):
                if alpha[y,x]==0:
                   strokes[i,y,x]=Background
                elif alpha[y,x]==255:
                   strokes[i,y,x]=Foreground
                if alpha[height-y-1,x]==0:
                   strokes[i,height-y-1,x]=Background
                elif alpha[height-y-1,x]==255:
                   strokes[i,height-y-1,x]=Foreground
    return strokes   


with tf.Session() as sess:
     saver.restore(sess, "./net/save_net.ckpt")
     
     data_dir = os.path.join(os.getcwd(), 'train.h5')
     prob_dir = os.path.join(os.getcwd(), 'probs.h5')
     
     train_images, train_labels = read_data(data_dir)
     train_strokeMaps=train_images.copy()
     train_probs = read_prob(prob_dir)
     train_locs = read_loc(data_dir)
     #sess.run(tf.initialize_all_variables())
     
     # draw figure for loss
    # plt.axis([0,20,-5,5])
     fig_loss = []

     counter = 0
     for ep in xrange(n_steps):
          batch_idxs = len(train_images) // config.batch_sz
          print ('batch_idxs==', batch_idxs)
          for idx in xrange(0, batch_idxs):
               print ('idx==', idx)
               counter = counter + 1
               
               batch_images = train_images[idx*config.batch_sz : (idx+1)*config.batch_sz]
               batch_labels = train_labels[idx*config.batch_sz : (idx+1)*config.batch_sz]
               batch_strokeMaps= train_strokeMaps[idx*config.batch_sz : (idx+1)*config.batch_sz]
               batch_probs  = train_probs[idx*config.batch_sz : (idx+1)*config.batch_sz]
               batch_locs=train_locs[idx*config.batch_sz : (idx+1)*config.batch_sz]

               
               batch_images = np.reshape(batch_images, (-1, config.img_sz * config.img_sz * config.img_channels))
               batch_labels = np.reshape(batch_labels, (-1, config.alphaMatte_sz * config.alphaMatte_sz * config.alphaMatte_channels))
               batch_strokeMaps = np.reshape(batch_strokeMaps, (-1, config.img_sz * config.img_sz * config.img_channels))
               
               
               batch_images = np.tile(batch_images, [config.M, 1]).astype('float32')
               batch_labels = np.tile(batch_labels, [config.M, 1]).astype('float32')
               batch_strokeMaps = np.tile(batch_strokeMaps, [config.M, 1]).astype('float32')

               batch_probs = np.tile(batch_probs, [config.M, 1]).astype('float32')
               batch_locs=np.tile(batch_locs, [config.M,1,1]).astype('float32')

               batch_images = np.reshape(batch_images, (-1, config.img_sz, config.img_sz, config.img_channels))
               batch_labels = np.reshape(batch_labels, (-1, config.alphaMatte_sz, config.alphaMatte_sz))
               batch_strokeMaps = np.reshape(batch_strokeMaps, (-1, config.img_sz, config.img_sz, config.img_channels))
               
               loc_net.sampling = True
               alpha_est_val,adv_val, baseline_mse_val, mse_loss_this_val, \
               mse_loss_last_val, logllratio_val, logll_val, rewards_val, reward_val, \
               loss_val, lr_val, _ = sess.run([alpha_est,advs, baseline_mse, mse_loss_this,
                                        mse_loss_last, logllratio, logll, rewards, reward,
                                        loss, learning_rate, train_op], 
                                        feed_dict={image_ph: batch_images,
                                                   gtAlpha_ph: batch_labels,
                                                   strokeMap_ph: batch_strokeMaps,
                                                   init_feature: batch_probs,
                                                   ideal_seq:batch_locs
                                            })
               print('adv: ',adv_val)
               print('-logllratio: ',logllratio_val)
               print('baseline_mse: ',baseline_mse_val)          
               print('reward: ',reward_val)                  
               print('loss: ',loss_val)                          
	       print type(alpha_est_val)
	       print alpha_est_val.shape
               fig_loss.append(loss_val)
               #plt.plot(fig_loss)
               if counter%10==0:
                    save_path=saver.save(sess,"./net/save_net.ckpt",global_step=counter)
                    #plt.show()
               
     #plt.show()
               
     #save_path=saver.save(sess,"./net/save_net.ckpt",global_step=x)
    #save_path=saver.save(sess,"./net/save_net.ckpt")

end_time=datetime.datetime.now()
print 'total time is: ',(end_time-start_time).seconds
    













