#!/usr/bin/env python
# coding: utf-8

#from IPython.core.display import display, HTML
#display(HTML("<style>.container { width:100% !important; }</style>"))
import math
import json
import numpy as np
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
import random
from sklearn.neighbors import KNeighborsClassifier
import timeit
import scipy.io as sio
import glob
from os.path import join
import h5py
#tf.compat.v1.disable_eager_execution()


from sklearn import preprocessing,decomposition
##
def zscore(data):
    zscore=preprocessing.StandardScaler()
    data_zs=zscore.fit_transform(data)
    return(data_zs)


def zspca(data,n_components):
    ##zscore and pca
    data=zscore(data)
    pca=decomposition.PCA(n_components=n_components)
    pca_model=pca.fit(data)
    data_pca=pca_model.transform(data)
    return(data_pca)

def pca(data,n_components):
    pca=decomposition.PCA(n_components=n_components)
    pca_model=pca.fit(data)
    data_pca=pca_model.transform(data)
    return(data_pca)

def pca_proper(data,n_components):
    pca=decomposition.PCA(n_components=n_components)
    data=data-np.mean(data,axis=0)
    pca_model=pca.fit(data)
    data_pca=pca_model.transform(data)
    return(data_pca)


##load data
def load_data(path,mids):
  data_all={}
  keys=[]
  for mi in range(len(mids)):
    #d=sio.loadmat(join(path,mids[mi],'encdec',mids[mi]+'_bw_ego3D_pds.mat'))
    matFile=glob.glob(join(path,mids[mi]+'*.mat'))[0]
    d=sio.loadmat(matFile)
    print(len(d))
    for key,value in d.items():
      if key.find('segLength')>-1:
         #if key.find('seglong')>-1:
         data_all[key]=value
         keys.append(key)
      else:
         print(key)
         ##__header__version globals
    print(len(data_all))
  return([data_all,keys])

def min_max_normalize(data_all,keys):
  ##normalize point_distance and ego3D to -1,1 separately
  data=[data_all[key].copy() for key in keys]
  d2=np.concatenate(data,axis=0)
  ##pdn-single rat feats | interaction feats
  pdn=2063
  pdmin=np.min(d2[:,:pdn])
  pdmax=np.max(d2[:,:pdn])
  egmin=np.min(d2[:,pdn:])
  egmax=np.max(d2[:,pdn:])
  for i,key in enumerate(keys):
      data_all[key][:,:pdn]=2*(data[i][:,:pdn]-pdmin)/(pdmax-pdmin)-1
      data_all[key][:,pdn:]=2*(data[i][:,pdn:]-egmin)/(egmax-egmin)-1
  return([data_all,keys])



def zscore_normalize(data_all,keys):
  ##normalize point_distance and ego3D to -1,1 separately
  seg_len=data_all[keys[0]].shape[0]
  data=np.concatenate([data_all[key].copy() for key in keys],axis=0)
  data=zscore(data)
  for i,key in enumerate(keys):
      data_all[key]=data[i*seg_len:(i+1)*seg_len]
  return([data_all,keys])


##random split tain and test
#80% vs 20%
def split_train_test(data_all,keys):
  random.seed(1000)
  sample_num=len(keys)
  inds=[i for i in range(sample_num)]
  random.shuffle(inds)
  ##
  train_num=int(sample_num*0.8)
  train_inds=inds[:train_num]
  test_inds=inds[train_num:]
  ##
  train_data={}
  for ti in train_inds:
    train_data[keys[ti]]=data_all[keys[ti]]
  test_data={}
  for tei in test_inds:
    test_data[keys[tei]]=data_all[keys[tei]]
  return([train_data,test_data])


def get_feats(raw_data):
    fea_xyz = []
    seq_len = []
    keys=[]
    for key, val in raw_data.items():
        fea_xyz.append(raw_data[key])
        seq_len.append(val.shape[0])
        keys.append(key)
    return fea_xyz,seq_len,keys


def preprocess(path,mids):
    #load and split
    data_all,keys=load_data(path,mids)
    ###data_all,keys=min_max_normalize(data_all,keys)##more suitable for network
    ###data_all,keys=zscore_normalize(data_all,keys)  ##more robust
    train_data,test_data=split_train_test(data_all,keys)
    # get features
    tr_fea_xyz,tr_seq_len, tr_keys = get_feats(train_data)
    te_fea_xyz,te_seq_len, te_keys = get_feats(test_data)
    return train_data,test_data,tr_fea_xyz, tr_seq_len, tr_keys, te_fea_xyz, te_seq_len, te_keys

def get_feats_olc(project,dataset_name):  ##according to OpenLabCluster project path
    #dataset_name='train'
    if dataset_name=='':
        keys=open(join(project,'videos/clipNames.txt')).read().strip().split('\n')  #clipNames 0,1,2
        fid=h5py.File(join(project,'datasets/data.h5'),'r')  ##'0','1','2', not in the clipNames order
    else:
        keys=open(join(project,'videos/clipNames_'+dataset_name+'.txt')).read().strip().split('\n')
        fid=h5py.File(join(project,'datasets/data_'+dataset_name+'.h5'),'r');
    data={}
    feats=[]
    seq_len=[]
    for ki,key in enumerate(keys):
        val=fid[str(ki)]
        data[key]=val
        feats.append(val)
        seq_len.append(val.shape[0])
    return data,feats,seq_len,keys


def load_train_test(project):  ##according to OpenLabCluster project path
    train_data,tr_feats,tr_seq_len,tr_keys=get_feats_olc(project,'train')
    test_data,te_feats,te_seq_len,te_keys=get_feats_olc(project,'test')
    return train_data,test_data,tr_feats, tr_seq_len, tr_keys, te_feats, te_seq_len, te_keys

def load_all(project):
    data,feats,seq_len,keys=get_feats_olc(project,'')
    return data,feats,seq_len,keys

# # Seq2Seq class
# # FS class
from tensorflow.python.ops.rnn import _transpose_batch_time
# # FW class
class Seq2SeqModelFW(object):
    def __init__(self, max_seq_len, input_size, rnn_size, batch_size, lr, train_keep_prob=1.0,decay_rate=0.95,dtype=tf.float32):
        self.max_seq_len = max_seq_len
        self.rnn_size = rnn_size*2
        self.batch_size = tf.placeholder_with_default(batch_size,shape=())
        self.input_size = input_size
        self.lr = tf.Variable( float(lr), trainable=False, dtype=dtype)
        self.learning_rate_decay_op = self.lr.assign( self.lr * decay_rate)
        self.keep_prob = tf.placeholder_with_default(train_keep_prob,shape=())
        self.global_step = tf.Variable(0, trainable=False)
        print('rnn_size = {0}'.format(rnn_size))
        
        with tf.variable_scope("inputs"):
            self.enc_xyz = tf.placeholder(dtype, shape=[None, self.max_seq_len, input_size], name='enc_xyz')
            self.dec_xyz = tf.placeholder(dtype, shape=[None, self.max_seq_len, input_size], name='dec_xyz')
            self.seq_len = tf.placeholder(tf.int32,[None])
            mask = tf.sign(tf.reduce_max(tf.abs(self.enc_xyz), 2))
        with tf.variable_scope("prediction"):
            with tf.variable_scope("encoder"):
                with tf.variable_scope("encoder_xyz",reuse=tf.AUTO_REUSE):
                    cell_fw_xyz = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(self.rnn_size//2) for i in range(3)])
                    cell_bw_xyz = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(self.rnn_size//2) for i in range(3)])
                    data_all,tuple_xyz = tf.nn.bidirectional_dynamic_rnn(cell_fw_xyz,cell_bw_xyz,self.enc_xyz,dtype=tf.float32,sequence_length=self.seq_len)
                    #print(tuple_xyz)  ##last 3 layers
                    #print(tuple_xyz[0]) #fw last frame of 3 layers [0 (None,30),1 (None,30),2 (None,30)]
                    #print(tuple_xyz[1]) #bw last frame of 3 layers [0 (None,30),1 (None,30),2 (None,30)]
                    self.bi_xyz_h_Seq = tf.concat((data_all[0],data_all[1]),-1)# data_all: all sequences of fw and bw [fw (None,24,30), bw(None, 24,30)]
                    #print(tuple_xyz[1][-1].shape) #bw last
                    #print(tuple_xyz[0][-1].shape) #fw last
                    #print(tuple_xyz[1][-1].shape) #bw last
                    bi_xyz_h = tf.concat((tuple_xyz[0][-1],tuple_xyz[1][-1]),-1)
                    #print(bi_xyz_h)
                    #self.enc_states = _ #features [batch,time,2048]
                    self.bi_xyz_h = bi_xyz_h
            self.knn_state = self.bi_xyz_h
            with tf.variable_scope("decoder"):
                with tf.variable_scope("decoder_xyz",reuse=tf.AUTO_REUSE):
                    cell_xyz = tf.nn.rnn_cell.GRUCell(self.rnn_size)
                    self.dec_outputs_xyz, dec_final_state_xyz = tf.nn.dynamic_rnn(cell_xyz, tf.zeros_like(self.dec_xyz), sequence_length=self.seq_len, initial_state = self.bi_xyz_h,dtype = tf.float32)
                    ## self.dec_outputs_xyz: sequence (None,24,60); 
                    ## dec_final_state_xyz: last frame of sequence (None,60)
                    print(0)
            def loss_with_mask(pred,gt,mask):
                loss = tf.reduce_sum(tf.abs(pred-gt),2)*mask
                loss = tf.reduce_sum(loss,1)
                loss /= tf.reduce_sum(mask,1)
                loss = tf.reduce_mean(loss)
                return loss
            with tf.variable_scope("pred_xyz",reuse=tf.AUTO_REUSE):
                #FC = tf.layers.Dense(units=60,activation=None, name="pred_skel")
                FC = tf.layers.Dense(units=input_size,activation=None, name="pred_skel")
                pred_xyz2xyz = FC(self.dec_outputs_xyz)
                #print(self.dec_outputs_xyz.shape)  ##24,60
                #print(pred_xyz2xyz.shape)  ##24,41
            self.loss = loss_with_mask(pred_xyz2xyz,self.enc_xyz,mask)
        ##trainable parameters
        self.pred_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "prediction/encoder")  ##define trainable parameters, defaults
        #####self.fc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "prediction/pred_xyz")
        ##params = self.pred_vars ####+ self.fc_vars
        #opt = tf.compat.v1.train.AdamOptimizer(self.lr)  ##.minimize(self.loss,var_list=params) default var_list~GraphKeys.TRAINABLE_VARIABLES
        opt = tf.train.AdamOptimizer(self.lr) 
        gradients, params = zip(*opt.compute_gradients(self.loss))
        clipped_gradients, norm = tf.clip_by_global_norm(gradients, 25)
        self.updates = opt.apply_gradients(zip(clipped_gradients,params),global_step = self.global_step)
        self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
        self.saver_multi = tf.train.Saver(tf.global_variables(), max_to_keep=100)
    ##
    def predict(self, session, encoder_inputs_xyz,decoder_inputs_xyz,seq_len,forward_only):
        if not forward_only:
            input_feed = {self.enc_xyz: encoder_inputs_xyz,
                          self.dec_xyz: decoder_inputs_xyz,
                          self.seq_len: seq_len}
            output_feed = [self.loss]
            outputs = session.run(output_feed, input_feed)
            return outputs[0]
    
    def step(self, session, encoder_inputs_xyz,decoder_inputs_xyz,seq_len,forward_only):
        if not forward_only:
            input_feed = {self.enc_xyz: encoder_inputs_xyz,
                          self.dec_xyz: decoder_inputs_xyz,
                          self.seq_len: seq_len}
            output_feed = [self.updates,self.loss]
            outputs = session.run(output_feed, input_feed)
            return outputs[0], outputs[1]

    def get_encdec_seqs_inside(self, session, encoder_inputs_xyz,decoder_inputs_xyz,seq_len,forward_only):
        if not forward_only:
            input_feed = {self.enc_xyz: encoder_inputs_xyz,
                          self.dec_xyz: decoder_inputs_xyz,
                          self.seq_len: seq_len}
            output_feed = [self.bi_xyz_h_Seq,self.dec_outputs_xyz]
            outputs = session.run(output_feed, input_feed)
            return outputs[0], outputs[1]

# ## Linear wrapper for FS
#from tensorflow.contrib.rnn.python.ops.core_rnn_cell import RNNCell
RNNCell=tf.nn.rnn_cell.BasicRNNCell
##from tensorflow.keras.rnn.python.ops.core_rnn_cell import RNNCell
class LinearSpaceDecoderWrapper(RNNCell):
  """Operator adding a linear encoder to an RNN cell"""
  def __init__(self, cell, output_size):
    """Create a cell with with a linear encoder in space.
    Args:
      cell: an RNNCell. The input is passed through a linear layer.
    Raises:
      TypeError: if cell is not an RNNCell.
    """
    if not isinstance(cell, RNNCell):
      raise TypeError("The parameter cell is not a RNNCell.")

    self._cell = cell

    print( 'output_size = {0}'.format(output_size) )
    print( ' state_size = {0}'.format(self._cell.state_size) )

    # Tuple if multi-rnn
    if isinstance(self._cell.state_size,tuple):

      # Fine if GRU...
      insize = self._cell.state_size[-1]

      # LSTMStateTuple if LSTM
      if isinstance( insize, LSTMStateTuple ):
        insize = insize.h

    else:
      # Fine if not multi-rnn
      insize = self._cell.state_size

    self.w_out = tf.get_variable("proj_w_out",
        [insize, output_size],
        dtype=tf.float32,
        initializer=tf.random_uniform_initializer(minval=-0.04, maxval=0.04))
    self.b_out = tf.get_variable("proj_b_out", [output_size],
        dtype=tf.float32,
        initializer=tf.random_uniform_initializer(minval=-0.04, maxval=0.04))

    self.linear_output_size = output_size


  @property
  def state_size(self):
    return self._cell.state_size

  @property
  def output_size(self):
    return self.linear_output_size

  def __call__(self, inputs, state, scope=None):
    """Use a linear layer and pass the output to the cell."""

    # Run the rnn as usual
    output, new_state = self._cell(inputs, state, scope)

    # Apply the multiplication to everything
    output = tf.matmul(output, self.w_out) + self.b_out

    return output, new_state


# ## Residual Wrapper for FS
class ResidualWrapper(RNNCell):
  """Operator adding residual connections to a given cell."""

  def __init__(self, cell):
    """Create a cell with added residual connection.

    Args:
      cell: an RNNCell. The input is added to the output.

    Raises:
      TypeError: if cell is not an RNNCell.
    """
    if not isinstance(cell, RNNCell):
      raise TypeError("The parameter cell is not a RNNCell.")

    self._cell = cell

  @property
  def state_size(self):
    return self._cell.state_size

  @property
  def output_size(self):
    return self._cell.output_size

  def __call__(self, inputs, state, scope=None):
    """Run the cell and add a residual connection."""

    # Run the rnn as usual
    output, new_state = self._cell(inputs, state, scope)

    # Add the residual connection
    output = tf.add(output, inputs)

    return output, new_state


# # Get learned Features for knn evaluation
def get_feature(model, session, encoder_inputs_xyz, batch_size, seq_len):
    input_feed = {model.enc_xyz: encoder_inputs_xyz,
                  model.dec_xyz: encoder_inputs_xyz,
                  model.seq_len: seq_len, model.batch_size:batch_size}
    output_feed = [model.knn_state]
    outputs = session.run(output_feed, input_feed)
    return outputs[0]


# # Get sequences inside models
def get_feature(model, session, encoder_inputs_xyz, batch_size, seq_len):
    input_feed = {model.enc_xyz: encoder_inputs_xyz,
                  model.dec_xyz: encoder_inputs_xyz,
                  model.seq_len: seq_len, model.batch_size:batch_size}
    output_feed = [model.knn_state]
    outputs = session.run(output_feed, input_feed)
    return outputs[0]


def get_encdec_seqs(model, session, encoder_inputs_xyz, batch_size, seq_len):
    input_feed = {model.enc_xyz: encoder_inputs_xyz,
                  model.dec_xyz: encoder_inputs_xyz,
                  model.seq_len: seq_len, model.batch_size:batch_size}
    output_feed = [model.bi_xyz_h_Seq,model.dec_outputs_xyz]
    outputs = session.run(output_feed, input_feed)
    return outputs[0], outputs[1]

# # Training
def get_session():
    """Create a session that dynamically allocates memory."""
    # See: https://www.tensorflow.org/tutorials/using_gpu#allowing_gpu_memory_growth
    config = tf.ConfigProto(log_device_placement=True,allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    session = tf.Session(config=config)
    return session


# ## Mini batch
def mini_batch(data, seq_len, input_size, batch_size):
    #encoder inputs
    encoder_inputs_xyz = np.zeros((batch_size, seq_len, input_size),dtype=float)
    #length
    seq_len_enc = np.zeros((batch_size,), dtype=float)
    #decoder inputs
    decoder_inputs_xyz = np.zeros((batch_size, seq_len, input_size),dtype=float)
    data_len = len(data)
    for i in range(batch_size):
        key = random.choice(list(data.keys()))
        data_sel = data[key]
        encoder_inputs_xyz[i, :data_sel.shape[0], :] = np.copy(data_sel)
        seq_len_enc[i] = data_sel.shape[0]
        
    return encoder_inputs_xyz, decoder_inputs_xyz, seq_len_enc


# ## Mini batch for evaluating learned features
def mini_batch_feats(feature_xyz,seq_len,batch_size):
    for start in range(0,len(feature_xyz),batch_size):
        end = min(start+batch_size,len(feature_xyz))
        yield feature_xyz[start:end],seq_len[start:end]


# ## Mini batch for evaluating learned features
def mini_batch_classify(feature_xyz,labels,seq_len,batch_size):
    for start in range(0,len(feature_xyz),batch_size):
        end = min(start+batch_size,len(feature_xyz))
        yield feature_xyz[start:end],labels[start:end],seq_len[start:end]



# ## Simple structure for AEC
class AEC(object):
    def __init__(self, input_size, batch_size, lr, dtype=tf.float32):
       
        self.X = tf.placeholder(dtype=tf.float32, shape=[None, input_size], name='X')

        #encoder
        self.fc1 = tf.layers.dense(self.X,1024,activation='tanh')
        self.fc2 = tf.layers.dense(self.fc1,512,activation='tanh')
        self.fea = tf.layers.dense(self.fc2,256,activation='tanh')

        #decoder
        self.fc3 = tf.layers.dense(self.fea,512,activation='tanh')
        self.fc4 = tf.layers.dense(self.fc3,1024,activation='tanh')
        self.Y = tf.layers.dense(self.fc4,input_size,activation=None)

        self.loss = tf.reduce_mean(tf.square(self.X-self.Y))
        self.optimizer = tf.train.AdamOptimizer(lr)
        self.opt = self.optimizer.minimize(self.loss)
        self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
    

    def step_test(self, sess, feature, forward_only=False):
            test_loss = sess.run([self.loss],feed_dict={self.X:feature})
            return test_loss


    def step(self, sess, feature, forward_only=False):
        
        if forward_only:
            output = sess.run([self.fea],feed_dict={self.X:feature})
            return output
        else:
            _,train_loss = sess.run([self.opt,self.loss],feed_dict={self.X:feature})
            return train_loss


def aec_mini_batch(feature_xyz,batch_size):
    for start in range(0,len(feature_xyz),batch_size):
        end = min(start+batch_size,len(feature_xyz))
        yield feature_xyz[start:end]



