#conda activate OpenLabCluster
import pickle as pkl
import shutil
import sys
sys.path.append('/home/liying_lab/chenxf/ml-project/LILAB-py/lilab/OpenLabCluster_train')
from ucla_function import *
import os, datetime
from os.path import join
from auxiliaryfunctions import read_config,edit_config
import sys
import tensorflow as tf

###### note for each running

os.environ["CUDA_VISIBLE_DEVICES"] = '0'  #'0'  ##-1, CPU
##

project='/DATA/taoxianming/rat/data/Mix_analysis/SexAgeDay55andzzcWTinAUT_MMFF/result32/olc-iter1-2024-05-23'

##parameters
config_path=join(project,'config.yaml')
config=read_config(config_path)
if config['tr_modelType']=='seq2seq':
    modelPrefix='FWPCA0.00_P100_en3_hid'+str(config['hidden_size'])+'_epoch'
elif config['tr_modelType']=='semi_seq2seq':
    modelPrefix='OPCA0.50_P100_en3_hid30_epoch'

###Seq2Seq related part
data_path=join(project,'datasets')
resPath=config['output_path']

##no down sample actually
# dsamp_train,dsamp_test,tr_fea_xyz, tr_seq_len, tr_keys, te_fea_xyz, te_seq_len, te_keys= load_train_test(project)
_,featsUse,seq_lenUse,_=load_all(project)

##### Hyperparameters
max_seq_len,input_size = featsUse[0].shape  #24,41
rnn_size=config['hidden_size']  #30--60
batch_size=config['batch_size']
#batch_size=256
# lr = config['learning_rate']
# decay_rate=0.95
##train_keep_prob =1.0 #no effect in model, default 1.0, ###0.9
# decay for training more epoches, i.e. more clips, default 0.95
# iterations=3000

# print(iterations)
##path for decoder sequences
modelDec_path=join(project,'models_Dec')
os.makedirs(modelDec_path,exist_ok=True)
# edit_config(config_path,{'multi_epoch':iterations,'batch_size':batch_size,'decay_rate':decay_rate,'modelDec_Path':modelDec_path})


#############Training FW
sess = tf.Session() # add GPU, in tf version 2 
tf.reset_default_graph()
# FW
if config['fix_weight']: model = Seq2SeqModelFW(max_seq_len, input_size,rnn_size, batch_size, lr)#,decay_rate=decay_rate)  

##training from initialization
sess = get_session()
sess.run(tf.global_variables_initializer())
##retraining
## FW No-labeling Training loop 
loss=[]
te_loss=[]
loss_min = 2000
te_loss_min=2000
start_time = timeit.default_timer()

##training and saving
for i in range(0,iterations+1):
    encoder_inputs_xyz,decoder_inputs_xyz,seq_len_enc = mini_batch(dsamp_train, seq_len=max_seq_len, input_size=input_size, batch_size=batch_size)
    _,train_loss = model.step(sess,encoder_inputs_xyz,decoder_inputs_xyz,seq_len_enc, False)
    loss.append(train_loss)
    te_encoder_inputs_xyz,te_decoder_inputs_xyz,te_seq_len_enc = mini_batch(dsamp_test, seq_len=max_seq_len, input_size=input_size, batch_size=batch_size)
    test_loss = model.predict(sess,te_encoder_inputs_xyz,te_decoder_inputs_xyz,te_seq_len_enc, False)
    te_loss.append(test_loss)
    if test_loss<te_loss_min and abs(test_loss-train_loss)/test_loss<0.05: #0.1: 0.01 ##0.05 test_loss and train_loss both small
        print("step {0}:  train loss:{1:.4f}  ".format(i, train_loss),end='')
        print("step {0}:  test loss:{1:.4f}\n".format(i, test_loss),end='')
        ##only keep 1 model
        ##save the better model
        modelPath=join(project,'models',modelPrefix+str(i))
        model.saver.save(sess,modelPath)
        te_loss_min=test_loss
        print('... save\n')
        if (test_loss/te_loss[0]<0.16 and i>230) or i>400:
            model.saver_multi.save(sess,join(modelDec_path,'DecSeq-'+modelPrefix+str(i)))
            epochUse=i   
            edit_config(config_path,{'epochUse':epochUse})
            break
    if i%30 == 0:
        print("step {0}:  train loss:{1:.4f}  ".format(i, train_loss),end='')
        print("step {0}:  test loss:{1:.4f}".format(i, test_loss),end='')
        end_time = timeit.default_timer()
        print(" using {:.2f} sec".format(end_time-start_time))
        start_time = end_time
        print('\n')
    if i%50 == 0:
        sess.run(model.learning_rate_decay_op)


##calculate data seperability to choose training epoch number
##save model loss
sess.close()
edit_config(config_path, {'tr_modelName':modelPath})
np.savez(modelPath+'.loss', loss=loss,te_loss=te_loss)
shutil.copy(config_path,join(project,'models'))
print('epochUse:%s'%(epochUse))

##loss line
import matplotlib.pyplot as plt
filePath=modelPath+'.loss.jpg'
print(filePath)
fig = plt.figure(figsize=(10, 10),dpi=300)
plt.plot(range(len(loss)),loss,label="Training loss")
plt.plot(range(len(loss)),te_loss,color='red',linewidth=1,linestyle='--',label="Test loss")
plt.title(u'train_loss and test_loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(0.95, 0.95), loc=1, borderaxespad=0.)
fig.savefig(filePath)



##calulate enc and dec latent sequences in model path
tf.reset_default_graph()
model = Seq2SeqModelFW(max_seq_len, input_size,rnn_size, batch_size, lr)
sess = get_session()
#for i in range(0,iterations+1,50):
#for i in [0,250,300,400,500]:
for i in [epochUse]:  #512
    ##load model and get enc dec sequences
    mdPath=join(modelDec_path,'DecSeq-'+modelPrefix+str(i))
    model.saver.restore(sess,mdPath) 
    enc_seqs = []
    dec_seqs = []
    for feature_xyz,seq_len in mini_batch_feats(featsUse,seq_lenUse,batch_size):
        enc_seq,dec_seq=get_encdec_seqs(model, sess, feature_xyz, len(feature_xyz), seq_len)
        dec_seqs.extend(dec_seq)
    pkl.dump({'enc_seqs':np.array(enc_seqs),'dec_seqs':np.array(dec_seqs)},open(mdPath+'-EncDecSeqs_alldata.pkl','wb'),protocol=4)
sess.close()
####plot trajectory of latent PCs of Encoders and Decoders

##
import hypertools as hyp
import matplotlib.pyplot as plt
##
clip_long=24
plotN=200
colors=[]
for i in range(plotN): colors.extend([i]*24)
for i in range(plotN): colors[i*24]=0
#labels = labeling
##calculate 3 PCs and plot in 3D space
#for i in range(0,iterations+1,50):
#for i in [0,250,300,400,500]:
for i in [epochUse]:  #512
    mdFile=join(modelDec_path,'DecSeq-'+modelPrefix+str(i)+'-EncDecSeqs_alldata.pkl')
    seqs=pkl.load(open(mdFile,'rb'))
    ##PCA and plot3D
    '''
    enc_seqs=seqs['enc_seqs']
    stateSeq_N=enc_seqs.shape[0]//2
    eps=pca(enc_seqs.reshape(stateSeq_N*2*24,60),3)#.reshape(stateSeq_N*2,24,3)
    epsP=eps[:clip_long*plotN,:]
    hyp.plot(epsP, '.',hue = colors,size=[13,13])
    plt.savefig(mdFile.replace('.pkl','-')+str(plotN)+'_enc.jpg')
    plt.close()
    '''
    ##decoder data
    dec_seqs=seqs['dec_seqs']
    stateSeq_N=dec_seqs.shape[0]
    dps=pca(dec_seqs.reshape(stateSeq_N*24,60),3)#.reshape(stateSeq_N*2,24,3)
    dpsP=dps[:clip_long*plotN,:]
    hyp.plot(dpsP, '.',hue = colors,size=[13,13])
    plt.savefig(mdFile.replace('.pkl','-')+str(plotN)+'_dec.jpg')
    #break



