import tensorflow as tf
import numpy as np
import os,cv2
#from DenseNet import DenseNet
from stroke import label_for_batches,user_interaction
#from matsolver import matsolver
#from sharedsolver import sharedsolver
from data_helper import process_data, get_data,extract_feat

def matsolver(images,trimaps,name):
          with tf.variable_scope(name,reuse = tf.AUTO_REUSE):
               inputs = tf.concat([images,trimaps],3)
               mattes = tf.layers.conv2d(inputs,filters=1,kernel_size=[1,1],strides=[1,1],padding='SAME',kernel_initializer=tf.contrib.layers.variance_scaling_initializer(), name=name+'_conv')
          return mattes




def fc_layer(inp,out_filters,name):
    '''
    Args:
         inp: a 4d tensor [batch_size,h,w,channels]
         out_filters: an integer
    output is a 2d tensor [batch_size,out_filters] 
    '''
    with tf.name_scope(name):
         _,h,w,c = inp.get_shape().as_list()
         x=tf.reshape(inp,[-1,h*w*c])
         w= tf.truncated_normal(shape=[h*w*c,out_filters],stddev=0.01)
         b=tf.Variable(tf.zeros([out_filters])) 
         out=tf.matmul(x,w)+b
    return out
     

def baseline_network(inp,out_filters,name='baseline_network'):
    with tf.variable_scope(name):
         _,inp_channels = inp.get_shape().as_list()
         w= tf.truncated_normal(shape=[inp_channels,out_filters],stddev=0.01)
         b=tf.Variable(tf.zeros([out_filters]))
         out=tf.matmul(inp,w)+b
    return out
             



class ActiveModel(object):
      def __init__(self,batch_size=1,mode='Training',image_width=512, image_height=512):
        '''
             Annotations
        '''
        self.time_steps = 20
        self.batch_size = batch_size
        self.image_height = image_height
        self.image_width = image_width
        self.dense_height = self.image_height/4
        self.dense_width = self.image_width/4
        self.image_channels = 3
        self.dense_channels = self.image_channels*16
        self.alpha_channels = 1
        self.win_img = 0.05
        self.window_size = int(self.image_height*self.win_img)
        self.rnn_input_size = 1000               #rnn_input_size is equal to rnn_output_size
        self.loc_dim = 2
        self.mode = mode
        self.loc_mean_list = []
        self.loc_list=[]
        self.estimated_alpha_list = []
        self.lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=self.rnn_input_size,)
        self.input_images = tf.placeholder(tf.float32,[None,self.image_height,self.image_width,3],name='ActiveModel_input_images_placeholder')
        self.dense = tf.placeholder(tf.float32,[None,self.dense_height,self.dense_width,self.dense_channels],name='dense_information_placeholder')
        self.dense_mask=tf.placeholder(tf.float32,[None,self.image_height,self.image_width,self.image_channels],name='dense_mask_placeholder')

        self.trimaps = self.input_images
        if mode =='Training':
           self.alphaMatte_gt = tf.placeholder(tf.float32,[None,self.image_height,self.image_width,self.alpha_channels],name='input_alphaGT_placeholder')
           self.alphas = tf.reshape(self.alphaMatte_gt,[-1,self.image_height,self.image_width,self.alpha_channels])
     
      def location_decoder(self,input_vector,name='location_decoding'):
        with tf.name_scope(name):
              loc_std = 0.1
              weight = tf.truncated_normal(shape=[self.rnn_input_size,self.loc_dim],stddev=0.01)
              bias = tf.Variable(tf.zeros([self.loc_dim])) 
              loc_mean = tf.clip_by_value(tf.matmul(input_vector,weight)+bias,-1,1)
              loc = loc_mean + tf.random.normal([1,self.loc_dim],stddev=loc_std)
              loc = tf.clip_by_value(loc,-1,1)
              loc = tf.round(loc/self.win_img)*self.win_img
        return loc,loc_mean



      

      def _glimpse(self,alpha_estimated,locs):
        glimpse = tf.image.extract_glimpse(alpha_estimated,[self.window_size,self.window_size],locs)
        glimpse2vector = tf.nn.relu(fc_layer(glimpse,self.rnn_input_size,name='glimpse2vector'))
        loc2vector_w = tf.truncated_normal(shape=[self.loc_dim,self.rnn_input_size],stddev=0.01)
        loc2vector_b = tf.Variable(tf.zeros([self.rnn_input_size]))
        loc2vector = tf.nn.relu(tf.matmul(locs,loc2vector_w)+loc2vector_b)
        vec = glimpse2vector+loc2vector
        return vec


      def _loopFunc(self,rnn_output,step):
        locs,locs_mean = self.location_decoder(rnn_output)
        if self.mode == 'Training':
           self.trimaps = tf.py_func(label_for_batches,[self.trimaps,self.alphas,locs,[self.window_size,self.window_size]],tf.float32)
           self.trimaps = tf.reshape(self.trimaps,[-1,self.image_height,self.image_width,self.image_channels])
           #alpha_estimated = matsolver(self.input_images,self.trimaps,name='matsolver_'+str(step))
           alpha_estimated = matsolver(self.input_images,self.trimaps,name='matsolver_'+str(step))
        else:
            self.trimaps = tf.py_func(label_for_batches,[self.trimaps,self.alphas,locs,[self.window_size,self.window_size],'Testing'],tf.float32)
            self.trimaps = tf.reshape(self.trimaps,[-1,self.image_height,self.image_width,self.image_channels])
            #alpha_estimated = matsolver(self.input_images,self.trimaps,name='matsolver_'+str(step),training=False)
            alpha_estimated = matsolver(self.input_images,self.trimaps,name='matsolver_'+str(step))
        self.estimated_alpha_list.append(alpha_estimated)
        self.loc_mean_list.append(locs_mean)
        self.loc_list.append(locs)
        vec = self._glimpse(alpha_estimated,locs)
        return vec
        
      def loglikelihood(self,mean_list,loc_list):
           sigma = self.loc_dim
           mu = tf.stack(mean_list)
           sampled = tf.stack(loc_list)
           gaussian = tf.contrib.distributions.Normal(mu,tf.cast(sigma,tf.float32))
           logll = gaussian.log_prob(sampled)
           logll = tf.reduce_mean(logll,2)
           logll = tf.transpose(logll)
           return logll



      def _build_model(self,):
          dense_vector = fc_layer(self.dense,self.rnn_input_size,name='dense_vector2rnn_init_input')
          attention_mask = tf.reshape(self.dense_mask,[-1,self.image_height*self.image_width,self.image_channels])
          init_state = self.lstm_cell.zero_state(self.batch_size,tf.float32)
          init_input = [dense_vector]
          rnn_init_output,rnn_init_states = tf.contrib.legacy_seq2seq.attention_decoder(decoder_inputs=init_input,initial_state=init_state,attention_states=attention_mask,cell=self.lstm_cell,scope='initialized_lstm',initial_state_attention=True)
          
          first_loc,_=self.location_decoder(rnn_init_output[0])
          if self.mode == 'Training':
           self.trimaps = tf.py_func(label_for_batches,[self.trimaps,self.alphas,first_loc,[self.window_size,self.window_size]],tf.float32)
           self.trimaps = tf.reshape(self.trimaps,[-1,self.image_height,self.image_width,self.image_channels])
           #alpha_estimated = matsolver(self.input_images,self.trimaps)
           alpha_estimated = matsolver(self.input_images,self.trimaps,name='matsolver_1st')
          else:
            self.trimaps = tf.py_func(label_for_batches,[self.trimaps,self.alphas,locs,[self.window_size,self.window_size],'Testing'],tf.float32)
            self.trimaps = tf.reshape(self.trimaps,[-1,self.image_height,self.image_width,self.image_channels])
            #alpha_estimated = matsolver(self.input_images,self.trimaps,training=False)
            alpha_estimated = matsolver(self.input_images,self.trimaps,name='matsolver_')
          self.estimated_alpha_list.append(alpha_estimated)
          first_vec = self._glimpse(alpha_estimated,first_loc)
          rnn_inputs = [first_vec]
          rnn_inputs.extend([0]*(self.time_steps))
          attn_states = attention_mask
          rnn_outputs,rnn_states= tf.contrib.legacy_seq2seq.attention_decoder(decoder_inputs=rnn_inputs,initial_state=rnn_init_states,attention_states=attn_states,cell=self.lstm_cell,loop_function=self._loopFunc,scope='time_step_lstm',initial_state_attention=True)
          
          self.rnn_outputs=rnn_outputs
          
       

      def _build_train_op(self):
          baselines = []
          rnn_outputs = self.rnn_outputs
          for output in rnn_outputs[1:] :
              baseline = baseline_network(output,1)
              baselines.append(baseline)       #[time_steps,batch_size]
          baselines = tf.transpose(tf.stack(baselines))  #[batch_size,time_steps]
          rewards = []
          for i in range(self.time_steps):
              alpha_last = self.estimated_alpha_list[i]
              alpha_this = self.estimated_alpha_list[i+1]
              mse_this = tf.reduce_mean(tf.square(alpha_this-self.alphas),reduction_indices=[1,2])
              mse_last = tf.reduce_mean(tf.square(alpha_last - self.alphas),reduction_indices=[1,2])
              reward = mse_last-mse_this
              rewards.append(reward)
          
          rewards = tf.transpose(tf.stack(rewards))
          #tf.summary.scalar('rewards',rewards)
          loc_compute = tf.stack(self.loc_list)
          loc_avg = tf.reduce_mean(loc_compute)
          loc_var = tf.reduce_mean(tf.square(tf.reduce_mean(loc_compute, reduction_indices=[1,2]) - loc_avg))
          tf.summary.scalar('loc_var',loc_var)
          logll = self.loglikelihood(self.loc_mean_list,self.loc_list)
          advs = rewards - tf.stop_gradient(baselines)
          logllratio = tf.reduce_mean(logll * advs)
          reward = tf.reduce_mean(reward)
          baseline_mse = tf.reduce_mean(tf.square((rewards - baselines)))
          self.loss = -logllratio + baseline_mse - loc_var
          tf.summary.scalar('Loss',self.loss)
          self.global_step = tf.train.get_or_create_global_step()
          self.lrn_rate = tf.train.exponential_decay(learning_rate=1e-3,
                                                   global_step=self.global_step,
                                                   decay_steps=10000,
                                                   decay_rate=0.98,
                                                   staircase=True)
          tf.summary.scalar('learning_rate', self.lrn_rate)
          optimizer = tf.train.AdamOptimizer(learning_rate=self.lrn_rate)
          update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
          with tf.control_dependencies(update_ops):
               opt = optimizer.minimize(self.loss,global_step=self.global_step)
          train_ops = [opt]
          self.train_op = tf.group(*train_ops)


      def build_graph(self):
          self._build_model()
          self._build_train_op()
          self.merged_summary =tf.summary.merge_all()



def train(sess,ckpt_dir='./net/', fromScratch=False,save_steps=500,mode='Training',training_step=5,batch_size=1):
    images_dirs,alphas_dirs = get_data('data')
    model = ActiveModel(mode=mode)
    model.build_graph()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    train_writer = tf.summary.FileWriter(ckpt_dir+'/log',sess.graph)
    if not fromScratch:
       ckpt = tf.train.latest_checkpoint(ckpt_dir)
       if ckpt:
              saver.restore(sess,ckpt)
       else:
              print('Cannot find ckpt File, training from scratch...')
    else:
       print('Training from scratch...') 
    
    for ep in range (training_step):
        batch_idxs = len(images_dirs) // batch_size
        print ('epoch=',ep)
        for idx in range (batch_idxs):
            batch_images_dir = images_dirs[idx*batch_size:(idx+1)*batch_size]
            batch_labels_dir = alphas_dirs[idx*batch_size:(idx+1)*batch_size]
            batch_images,batch_alphas,batch_trimaps = process_data(batch_images_dir,batch_labels_dir) 
            dense_vector,saliency_map = extract_feat(batch_images_dir)
            feed_dict={model.input_images:batch_images,model.alphaMatte_gt:batch_alphas,model.dense:dense_vector,model.dense_mask:saliency_map}
            batch_loss,step,batch_alphas,batch_image,_=sess.run([model.loss,model.global_step,model.estimated_alpha_list,model.input_images,model.train_op],feed_dict=feed_dict)
            summary_str = sess.run(model.merged_summary, feed_dict)
            print('Training at ', step, 'step, the loss is: ', batch_loss)
            train_writer.add_summary(summary_str, step)

            if step % save_steps == 1:
               if not os.path.isdir(ckpt_dir):
                        os.mkdir(ckpt_dir)
               saver.save(sess,os.path.join(ckpt_dir, 'model.ckpt'), global_step=step)
               print('save the ckpt at ', step, ' step') 



def infer(sess,image_dir,ckpt_dir='./net/'): 
    image = cv2.imread(image_dir)      
    image = cv2.resize(image,(512,512))
    model = ActiveModel(mode='Testing')
    model._build_model()
    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(ckpt_dir)
    saver = tf.train.Saver()
    saver.restore(sess,ckpt.model_checkpoint_path)
    dense_vector,saliency_map = extract_feat([image_dir])
    feed={model.input_images:batch_images,model.dense:dense_vector,model.dense_mask:saliency_map}
    alpha_prediction = sess.run(model.estimated_alpha_list,feed_dict=feed)
    alpha_prediction=alpha_prediction[0]
    return alpha_prediction


if __name__=='__main__':
   sess=tf.Session()
   train(sess)
   sess.close()
   #print(dense_vector.shape)
   #print(saliency_map.shape)

   
   
   #vec=model._loopFunc(tf.cast(tf.reshape(tf.range(1000),[1,1000]),tf.float32))
   #output = model._build_model_()
   #sess.run(tf.global_variables_initializer())
   #prediction = sess.run(output,feed_dict=feed_dict)
   #sess.close()
   #print(np.shape(prediction))
   
   #mask_prediction = saliency_map[0]
   #pred_h, pred_w, pred_c = mask_prediction.shape
   #pred = np.zeros((pred_h,pred_w))
   #pixel_cls = np.argmax(mask_prediction,axis=2)
   #for i in range (pred_h):
   #    for j in range (pred_w):
   #       if pixel_cls[i,j] == 0:
   #          pred[i,j]=0
   #       elif pixel_cls[i,j] == 1:
   #          pred[i,j] =128
   #       elif pixel_cls[i,j] == 2:
   #          pred[i,j]=255
   #       else:
   #          raise Exception('Argmax() has wrong axis!')


   #cv2.imshow('Saliency',pred)
   #cv2.waitKey(0)
   #cv2.destroyAllWindows()
   
             



      
      



