import tensorflow as tf
import numpy as np
from config import Config
import os, datetime
import h5py,cv2
from vgg16 import vgg16

config = Config()

def read_data(path):
     sess = tf.Session()
     images = np.array(h5py.File(path, 'r').get('images'))
     labels = np.array(h5py.File(path, 'r').get('labels'))
     locs = np.array(h5py.File(path, 'r').get('locs'))
     
     imgs_Ph = tf.placeholder(tf.float32, [None,224,224,3])
     vgg = vgg16(imgs_Ph, 'vgg16_weights.npz',sess)
     imgs = []
     for i in range(images.shape[0]):
         img = images[i,:,:,:]
         resized_img = cv2.resize(img,(224,224))
         imgs.append(resized_img)
     input_features = sess.run(vgg.conv5_3,feed_dict={imgs_Ph:imgs})
     sess.close()
     return input_features,images, labels,locs


    


class LSTMModel(object):
      def __init__(self,mode,sess):
          self.mode = mode
          self.LSTM_hidden_units =128
          self.step = config.num_glimpses
          features,images, labels,locs = read_data('./train.h5')
          self.init_features = features
          self.ideal_locs = locs
          _,self.feat_width,self.feat_height,self.feat_channel = self.init_features.shape
          self.input_features = tf.placeholder(tf.float32,[None,self.feat_width,self.feat_height,self.feat_channel],name='input_features_placeholder')
          self.ideal_seq = tf.placeholder(tf.int8,[None,self.step,config.loc_dim])
          self._extra_train_ops = []
   
      
      def build_graph(self):
          self._build_model()
          self._build_train_op()
          self.merged_summay = tf.summary.merge_all()

      def _build_model(self):
          with tf.variable_scope('lstm'):
               x = tf.stop_gradient(self.input_features)
               x = tf.tile([x],[self.step,1,1,1,1])  # [batch_size,w,h,c] ==> [step,batch_size,w,h,c]
               x =tf.transpose(x,[1,0,2,3,4])   #[step,batch_size,w,h,c] ==> [batch_size,step,w,h,c]
               x = tf.reshape(x,[-1, self.step,self.feat_width*self.feat_height*self.feat_channel])
               cell = tf.nn.rnn_cell.LSTMCell(self.LSTM_hidden_units, state_is_tuple=True)
               if self.mode == 'train':
                  cell = tf.nn.rnn_cell.DropoutWrapper(cell=cell, output_keep_prob=0.8)
               cell1 = tf.nn.rnn_cell.LSTMCell(self.LSTM_hidden_units, state_is_tuple=True)
               if self.mode == 'train':
                  cell1 = tf.nn.rnn_cell.DropoutWrapper(cell=cell1, output_keep_prob=0.8)

               stack = tf.nn.rnn_cell.MultiRNNCell([cell, cell1], state_is_tuple=True)
               initial_state = stack.zero_state(tf.shape(x)[0], dtype=tf.float32)  
               
               # The second output is the last state and we will not use that
               outputs, _ = tf.nn.dynamic_rnn(
                cell=stack,
                inputs=x,
                initial_state=initial_state,
                dtype=tf.float32,
                time_major=False
            )  # [batch_size, step, LSTM_hidden_units]

               outputs = tf.reshape(outputs,[-1,self.LSTM_hidden_units])   # [batch_size, step, LSTM_hidden_units] ==> [batch_size*step, LSTM_hidden_units]

          with tf.variable_scope('loc_decode'):
               W = tf.get_variable(name='W_out',
                                shape=[self.LSTM_hidden_units, config.loc_dim],
                                dtype=tf.float32,
                                initializer=tf.random_uniform_initializer())

               b = tf.get_variable(name='b_out',
                                shape=[config.loc_dim],
                                dtype=tf.float32,
                                initializer=tf.constant_initializer())

               self.pred_loc = tf.matmul(outputs, W) + b
               self.pred_loc = tf.cast(tf.clip_by_value(self.pred_loc,-1,1),tf.float32)
              # self.pred_loc=tf.round(self.pred_loc/config.win_img)*config.win_img 
               self.pred_loc = tf.reshape(self.pred_loc,[-1,self.step,config.loc_dim],name='final_predict')
               



      def _build_train_op(self):
          self.global_step = tf.train.get_or_create_global_step()

          self.loss = tf.losses.mean_squared_error(self.pred_loc,self.ideal_seq)
          self.cost = tf.reduce_mean(self.loss)
          tf.summary.scalar('cost', self.cost)

          self.lrn_rate = tf.train.exponential_decay(learning_rate=1e-3,
                                                   global_step=self.global_step,
                                                   decay_steps=10000,
                                                   decay_rate=0.98,
                                                   staircase=True)
          tf.summary.scalar('learning_rate', self.lrn_rate)
          self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lrn_rate).minimize(self.loss,global_step=self.global_step)
          train_ops = [self.optimizer] + self._extra_train_ops
          self.train_op = tf.group(*train_ops)


def train(sess,ckpt_dir='./net/',fromScratch=False,save_steps=5,mode='train'):
    model = LSTMModel(mode,sess)
    model.build_graph()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver(tf.global_variables())
    train_writer = tf.summary.FileWriter(ckpt_dir+'/log',sess.graph)
    if not fromScratch:
       ckpt = tf.train.latest_checkpoint(ckpt_dir)
       if ckpt:
              saver.restore(sess,ckpt)
       else:
              print('Cannot find ckpt File, training from scratch...')
    else:
         print('Training from scratch...')
       
    for ep in range (config.step):
        batch_idxs = len(model.init_features) // config.batch_sz
        print ('epoch==',ep)
        for idx in range (batch_idxs):
            batch_inputs = model.init_features[idx*config.batch_sz : (idx+1)*config.batch_sz]
            batch_locs = model.ideal_locs[idx*config.batch_sz : (idx+1)*config.batch_sz]
            feed = {model.input_features:batch_inputs,model.ideal_seq:batch_locs}
            summary_str, batch_cost, step, _ = \
                    sess.run([model.merged_summay, model.cost, model.global_step, model.train_op], feed)
            print('Training at ',step,'step, the loss is: ',batch_cost)            

            train_writer.add_summary(summary_str, step)

            #save the checkpoint
            if step % save_steps ==1:
               if not os.path.isdir(ckpt_dir):
                        os.mkdir(ckpt_dir)
               saver.save(sess,os.path.join(ckpt_dir, 'model.ckpt'), global_step=step)

def infer(ckpt_dir='./net/',loadgraph=True):
    inputs,_,_,_ = read_data('./train.h5')
    single_input = inputs[0]
    if loadgraph:
       ckpt = tf.train.get_checkpoint_state(ckpt_dir)
       saver = tf.train.import_meta_graph(ckpt.model_checkpoint_path +'.meta')
       batch_inputs = np.tile(single_input,[config.batch_sz,1,1,1])
       saver = tf.train.import_meta_graph(ckpt.model_checkpoint_path +'.meta')
       with tf.Session() as sess:
            saver.restore(sess,ckpt.model_checkpoint_path)
            output_loc = sess.run(tf.get_default_graph().get_tensor_by_name('loc_decode/final_predict:0'),feed_dict={'input_features_placeholder:0':batch_inputs})
            print(output_loc.shape)
            return output_loc
    else:
       with tf.Session() as sess:
            saver = tf.train.Saver()
            model = LSTMModel('test',sess)
            model._build_model()
            sess.run(tf.global_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(ckpt_dir)
            saver.restore(sess,ckpt.model_checkpoint_path)
            output_loc = sess.run(model.pred_loc,feed_dict={model.input_features:[single_input]})
            print(output_loc.shape)
            return output_loc





if __name__=="__main__":
   sess = tf.Session()
   train(sess)
   sess.close()
   
   #infer(loadgraph=True)            
   
