
import numpy as np 
import cv2
from config import Config
import tensorflow as tf
# from testStage
from getStrokeLabel import genStrokes
from utils import weight_variable, bias_variable, loglikelihood
from myGlimpse import GlimpseNet, LocNet
from myCNN import CNN2
from LastDraw import myDraw
import os
import h5py

config = Config()
# from testStage
rnn_cell = tf.nn.rnn_cell
seq2seq = tf.nn.seq2seq

n_steps = config.step

loc_mean_arr = []
sampled_loc_arr = []
alpha_est_arr = []

draw = genStrokes(config, )
cnn2 = CNN2(config,)
class aa():
	def func_b(self):
		self.func_a()
	def func_a(self):	
	# placeholders
		image_ph = tf.placeholder(tf.float32, [None, config.img_sz, config.img_sz, config.img_channels])
		strokeMap_ph = tf.placeholder(tf.float32, [None, config.img_sz, config.img_sz, config.img_channels])
		init_feature = tf.placeholder(tf.float32, [None, config.rnn_input_sz])

		def get_next_input(rnn_output, i):
		    """ take the output of RNN as input
		    produce the next gt vector for RNN
		    """
		    global strokeMaps
		    global images
		    global gtAlphas
		    loc, loc_mean = loc_net(rnn_output)
		    strokeMaps = tf.py_func(myDraw, [strokeMaps, loc], tf.double)
		    strokeMaps = tf.cast(strokeMaps, tf.float32)
		    alpha_est = tf.py_func(cnn2.op, [images, strokeMaps], tf.double)
		    alpha_est = tf.cast(alpha_est, tf.float32)
		    alpha_est_arr.append(alpha_est)
		    gl_next = glimpse_net(loc, alpha_est)
		    loc_mean_arr.append(loc_mean)
		    sampled_loc_arr.append(loc)
		    return gl_next

		# build the auxiliary glimpse-net && loc-net
		with tf.variable_scope('glimpse_net'):
		    glimpse_net = GlimpseNet(config, )
		with tf.variable_scope('loc_net'):
		    loc_net = LocNet(config, )

		# number of examples: N == M*batch
		N = tf.shape(image_ph)[0]

		# first_loc,_ = loc_net(init_feature)

		global strokeMaps
		global images
		strokeMaps = strokeMap_ph
		images = image_ph

		# RNN --- LSTM
		lstm_cell = rnn_cell.BasicLSTMCell(config.rnn_input_sz, forget_bias=1.0, state_is_tuple=True)

		init_state = lstm_cell.zero_state(N, tf.float32)

		rnn_init_input = [init_feature]

		with tf.variable_scope('first_rnn'):
		    rnn_init_output, rnn_init_states = seq2seq.rnn_decoder(rnn_init_input, init_state, lstm_cell)
		rnn_init_output = tf.squeeze(rnn_init_output)
		first_loc, _ = loc_net([rnn_init_output])

		strokeMaps = tf.py_func(myDraw, [strokeMaps, first_loc], tf.double)
		strokeMaps = tf.cast(strokeMaps, tf.float32)
		alpha_est = tf.py_func(cnn2.op, [images, strokeMaps], tf.double)

		alpha_est = tf.cast(alpha_est, tf.float32)
		alpha_est_arr.append(alpha_est)

		first_gl = glimpse_net(first_loc, alpha_est)

		rnn_inputs = [first_gl]
		rnn_inputs.extend([0] * (config.num_glimpses))
		rnn_outputs, rnn_states = seq2seq.rnn_decoder(rnn_inputs, rnn_init_states, lstm_cell,
				                              loop_function=get_next_input)

		# Baseline Network for b_t
		with tf.variable_scope('baseline'):
		    w_baseline = weight_variable((config.rnn_output_sz, 1))
		    b_baseline = bias_variable((1,))
		baselines = []
		for t, output in enumerate(rnn_outputs[1:]):
		    baseline_t = tf.nn.xw_plus_b(output, w_baseline, b_baseline)
		    baseline_t = tf.squeeze(baseline_t)  # remove dimensions of size 1
		    baselines.append(baseline_t)
		baselines = tf.pack(baselines)  # [timesteps, batch*M]
		baselines = tf.transpose(baselines)  # [batch*M, timesteps]

		rewards = []
		for i in range(config.num_glimpses):
		    alpha_last = tf.squeeze(alpha_est_arr[i])
		    alpha_this = tf.squeeze(alpha_est_arr[i + 1])

		loc_compute = tf.pack(sampled_loc_arr)  # [timesteps, batch_sz, loc_dim]

		loc_avg = tf.reduce_mean(loc_compute)  # average over all dims
		loc_var = tf.reduce_mean(tf.square(tf.reduce_mean(loc_compute, reduction_indices=[1, 2]) - loc_avg))

		logll = loglikelihood(tf.cast(loc_mean_arr, tf.float64), tf.cast(sampled_loc_arr, tf.float64),
				      tf.cast(config.loc_std, tf.float64))  # [batch_sz*M, timesteps]
		logll = tf.cast(logll, tf.float32)
		# average reward over batch_sz*M
		# baseline_mse

		# collect all trainable variables
		var_list = tf.trainable_variables()

		# hybrid loss

		# learning rate
		global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
		starter_learning_rate = config.lr_start

		learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, config.lrDecayFreq, 0.97,
				                           staircase=True)
		learning_rate = tf.maximum(learning_rate, config.lr_min)

		saver = tf.train.Saver()

		def read_data(path):
		    images = np.array(h5py.File(path, 'r').get('images'))
		    return images

		def read_prob(path):
		    probs = np.array(h5py.File(path, 'r').get('probs'))
		    return probs

		def read_loc(path):
		    locs = np.array(h5py.File(path, 'r').get('locs'))
		    return locs

		def pre_processStroke(images, alphas):
		    Background = (0, 0, 0)
		    Foreground = (255, 255, 255)
		    [batch_size, width, height, ch] = images.shape
		    strokes = images.copy()
		    for i in range(batch_size):
			img = images[i]
			alpha = alphas[i]
			for y in range(10):
			    for x in range(width):
				if alpha[y, x] == 0:
				    strokes[i, y, x] = Background
				elif alpha[y, x] == 255:
				    strokes[i, y, x] = Foreground
				if alpha[height - y - 1, x] == 0:
				    strokes[i, height - y - 1, x] = Background
				elif alpha[height - y - 1, x] == 255:
				    strokes[i, height - y - 1, x] = Foreground
		    return strokes

		with tf.Session() as sess:

		    saver.restore(sess, "./net/save_net.ckpt")
		    print "succeed"
a=aa()
a.func_b()
