# -*- coding: utf-8 -*-
"""
Created on Thu Jun 17 09:55:58 2021

@author: shijie
"""
def trymkdir(p):
    from os import mkdir
    try:
        mkdir(p)
    except:
        pass
    
def prepare_workdir():
    
    from sys import path as paths
    global ijob, GNNmodel_path, src_path, data_path, index_path, out_path, lr_path, RNAList_path
    
    ### serial number of this task.
    ijob = 1
    
    ### source code path
    src_path="/fsa/home/ww_dengcw/Desktop/GNN_multiprocessing_distrubuted/"
    
    ### pdb files path
    data_path="/fsa/home/ww_dengcw/Desktop/RNAS/new_sample1/"
    
    ### index files path. you can use 'compute_index_files.py' to get it.
    index_path="/fsa/home/ww_dengcw/Desktop/INDEX_FILES/"
    
    ### output files path.
    out_path="/fsa/home/ww_dengcw/Desktop/out/out%s/"%ijob
    trymkdir(out_path)
    
    ### learning rate file path. you can change lr safely by rewrite lr file  during training.  if it not exits, lr will delay with original strategy.
    lr_path=out_path+'lr.txt'
    
    ### model parameters path. it save parameters of GCN model during traning.
    GNNmodel_path = out_path + 'GNN_model/'
    
    ### RNA names List.
    RNAList_path=out_path+"out%s/RNA_PART/"%ijob
    
    trymkdir(GNNmodel_path)
    
    ### add GNN_NET path to system
    paths.append(src_path)
    
    
    ### register RNAGCN parameters.
    global node_dim, edge_dim, max_Natoms, n_neibors, onehots
    node_dim=1
    edge_dim=5
    max_Natoms=1700
    n_neibors=14
    onehots=54
    
    return

prepare_workdir()
    
def register_task_and_traning_parameters():
    from argparse import ArgumentParser
    from torch.multiprocessing import RawArray
    
    parser=ArgumentParser()

    # traning args-----------
    parser.add_argument('-train_nprocess', '--train_nprocess', default=2, type=int, help='total training processes')
    parser.add_argument('-calc_nprocess', '--calc_nprocess', default=18, type=int, help='total processes for computeing rna to graphs')
    
    parser.add_argument('-epochs', '--epochs', default=10, type=int, metavar='N', help='number of total epochs to run')
    parser.add_argument('-smb', '--smb', default=100, type=int, help='the batches of saving model during training.')
    parser.add_argument('-gama', '--gama', default=0.5, type=float, help='delay weight of learning rate.')
    parser.add_argument('-eds', '--eds', default=4, type=int, help='epochs of delaying learning rate if loss value is not down.')
    parser.add_argument('-batchsize', '--batchsize', default=64, type=int, help="batchsize per gpu.")
    parser.add_argument('-lr_start', '--lr_start', default=0.000005, type=float, help='learning rate to start.')
    
    parser.add_argument('--GNNmodel_path', default=GNNmodel_path, type=str, help = 'path of saving GNN models.')
    parser.add_argument('--epochs_per_train', default=RawArray('i',[10]), type=RawArray, help='finished epoch per train process')
    parser.add_argument('--gpuids', default=[0], type=list, help='gpu ids in machine. its number must be the same with train_nprocess')
    
    args=parser.parse_args()
    args.world_size=args.train_nprocess
    args.gpuids=[i for i in range(args.train_nprocess)]
    args.epochs_per_train = RawArray('i',[args.epochs for i in range(args.train_nprocess)])
    ### global args-------
    
    return args

args = register_task_and_traning_parameters()

def alloc(train_RNAS=None, test_RNAS=None, ijob=0, nprocess=24, renew=False, retest=False, re_alloc=False):
    from itertools import cycle
    import os 
    from random import shuffle
    import random
    path_outi=out_path+"out%s/"%ijob
    path_RNAname=path_outi+"RNA_PART/"
    RNAS=os.listdir(index_path)
    random.seed(114514)
    shuffle(RNAS)
    if renew:   

        path_GNNmodel=path_outi+"GNN_model/"
        path_testout=path_outi+"test_out/"
        path_GNN_model_test=path_outi+"GNN_model_test/"
        
        try:
            os.mkdir(path_outi)
        except:
            pass
        try:
            os.mkdir(path_GNNmodel)
        except:
            pass   
        try:
            os.mkdir(path_RNAname)
        except:
            pass     
        try:
            os.mkdir(path_testout)
        except:
            pass   
        try:
            os.mkdir(path_GNN_model_test)
        except:
            pass
        
        if train_RNAS:
            try:
                os.remove(path_RNAname+"trainRNAS.txt")
            except:
                pass
            f=open(path_RNAname+"trainRNAS.txt", "w")
            for i, rna in enumerate(train_RNAS):
                f.write(rna)
                if i!=len(train_RNAS)-1:
                    f.write("\n")
            f.close()
            
            items=os.listdir(path_RNAname)
            for dirname in items:
                if "train.txt" in dirname:
                    os.remove(path_RNAname+dirname)
                    
            trainRNA_models=[]
            n_train_env=0
            for rna in train_RNAS:
                models=os.listdir(data_path+rna+"/")
                for m in models:
                    m=m.replace(".pdb", "")
                    trainRNA_models.append(rna+"/"+m)
                    try:
                        f=open(index_path+rna+"/"+m+".txt")
                    except:
                        try:
                            f=open(index_path+rna+"/"+m+"_s0.txt")
                        except:
                            print(m, ' in  ENVS_NAME not in pdbfiles, skip.')
                            continue
                    nlines=len(list(f.readlines()))
                    f.close()
                    n_train_env+=nlines
            shuffle(trainRNA_models)
            Fs=[open(path_RNAname+str(i)+"train.txt", "w") for i in range(nprocess)]
            
            it=iter(trainRNA_models)
            
            for f in cycle(iter(Fs)):
                try:
                    f.write(next(it)+"\n")
                except:
                    break
            for f in Fs:
                f.close()
        if test_RNAS:
            try:
                os.remove(path_RNAname+"testRNAS.txt")
            except:
                pass
            f=open(path_RNAname+"testRNAS.txt", "w")
            for i, rna in enumerate(test_RNAS):
                f.write(rna)
                if i!=len(test_RNAS)-1:
                    f.write("\n")
            f.close()
        try:
            print("allocate, train_RNAmodels:%s envs:%s, test_RNAS:%s, return"%(len(trainRNA_models), n_train_env, len(test_RNAS)))
        except:
            print("has allocated, return")
    elif retest:
        f=open(path_RNAname+"trainRNAS.txt")
        train_RNAS=[x.strip() for x in list(f.readlines())]
        print(train_RNAS)
        f.close()
        try:
            os.remove(path_RNAname+"testRNAS.txt")
        except:
            pass
        f=open(path_RNAname+"testRNAS.txt", "w")
        cn=0
        for rna in RNAS:
            if rna not in train_RNAS:
                f.write(rna)
                cn+=1
                if cn==retest:
                    break
                f.write("\n")
        f.close()
    if re_alloc:
        allfiles=os.listdir(path_RNAname)
        for f in allfiles:
            if "train.txt" in f:
                os.remove(path_RNAname+f)
        try:
            f=open(path_RNAname+"trainRNAS.txt")
        except:
            print("no trainRNAS.txt file, return")
            return
        sskk=f.readlines()
        f.close()
        
        train_RNAS=[]
        for rna in sskk:
            train_RNAS.append(rna.strip())
        print("train_RNAS:", len(train_RNAS))   
        trainRNA_models=[]
        n_train_env=0
        for rna in train_RNAS:
            models=os.listdir(data_path+rna+"/")
            for m in models:
                m=m.replace(".pdb", "")
                trainRNA_models.append(rna+"/"+m)
                try:
                    f=open(index_path+rna+"/"+m+".txt")
                except:
                    f=open(index_path+rna+"/"+m+"_s0.txt")
                nlines=len(list(f.readlines()))
                f.close()
                n_train_env+=nlines
                n_train_env+=nlines
            print(rna, nlines)
        shuffle(trainRNA_models)
        Fs=[open(path_RNAname+str(i)+"train.txt", "w") for i in range(nprocess)]
        
        it=iter(trainRNA_models)
        
        for f in cycle(iter(Fs)):
            try:
                f.write(next(it)+"\n")
            except:
                break
        for f in Fs:
            f.close()
        print("realloc train RNAS finished. %srna, %senvs, %snprocess"%(len(train_RNAS), n_train_env, nprocess))

def get_modelindex_lr_lossf_model(GNNmodel_path, rank): 
    from torch import nn, load
    from GNN_NET import Net, Net1, Net2, Net3, Net4, Net5, Net6
    import os
    import torch
    Nets=[Net, Net1, Net2, Net3, Net4, Net5, Net6]
    lr=0.0005  #leraning rate
    lossf=nn.MSELoss()    #lossfunction
    
    torch.manual_seed(0)
    GNN_model=Nets[ijob](edge_dim=edge_dim)
    
    old_models=os.listdir(GNNmodel_path)
    if old_models:
        try:
            GNN_model.load_state_dict(load(GNNmodel_path+old_models[-1]))
        except:
            GNN_model.load_state_dict({k.replace("module.", ""):v for k, v in load(GNNmodel_path+old_models[-1]).items()})
        names=old_models[-1].split(sep="_")
        model_index=int(names[0][5:])+1
        lr=float(names[-1].replace(".pkl", "")) if len(names)>4 else 0.0012
        
        if rank==0:
            print("Load last training model %s, lr:%s"%(old_models[-1], lr))
    else:
        if rank==0:
            print("create a new model")
        model_index=0
    
    """统计网络参数数量"""
    nparas=0   
    for p in GNN_model.parameters():
        nparas+=p.nelement()
    
    if rank==0:
        print("init success, num_parameters:%d %d lossfunction:MSELoss,  optimzer:Adam, lr=%s"%(len(list(GNN_model.parameters())), nparas, lr))
    return model_index, lr, lossf, GNN_model

def train(rank, args, starts, ends, edge_datas, node_datas, rmss, KNs, status_codes, epoch_flags, n_batchs):   
    from torch import nn, tensor, optim, save, reshape, float32, device
    from dgl import graph
    import numpy as np
    from itertools import cycle
    from time import time
    from GNN_NET import onehot_node
    import torch.distributed as dist
    from os import environ, getpid
    #################################
    #init process group------------
    gpu=args.gpuids[rank]
    environ['MASTER_ADDR'] = 'localhost'     #
    environ['MASTER_PORT'] = '4236'
    print('%s train process %s start init process group --------'%(getpid(), rank))
    dist.init_process_group(
        backend='nccl',
        init_method='env://',
        world_size=args.world_size,
        rank=rank
    )
    print('%s train process %s  init succes! --------'%(getpid(), rank))
    #################################
    #init training--------------
    per_train_data=int(args.calc_nprocess/args.train_nprocess)
    my_device=device("cuda:%s"%gpu)
    model_index, lr, lossf, GNN_model=get_modelindex_lr_lossf_model(args.GNNmodel_path, rank)
    GNN_model=GNN_model.to(my_device)
    #wrap the model
    GNN_model = nn.parallel.DistributedDataParallel(GNN_model, device_ids=[gpu])
    
    lr=args.lr_start                           #leraning rate
    optimzer = optim.Adam(GNN_model.parameters(), betas=[0.9, 0.98], lr=lr)
    #################################
    #start training------------------
    nbatch=0
    nepoch=0
    train_loss=[]
    while True:
        total_loss=0.
        total_samples=0
        #f=open(path_loss, "w")
        finished=0
        
        for j in cycle(range(per_train_data * rank, per_train_data * (rank+1) ) ):
            if status_codes[j].value==1:
                print("rank_train %s rank_data %s"%(rank, j))
                #print("rank %s finished %s"%(rank, finished))
                #print("rank %s calc %s"%(rank, j))
                time0=time()
                #----copy data from shared space-------
                n_batch=n_batchs[j].value
                Natoms_part=KNs[j][:n_batch]
                Natoms=np.sum(Natoms_part)

                b_start=np.array(starts[j][:Natoms*n_neibors])
                b_end=np.array(ends[j][:Natoms*n_neibors])
                b_node=np.array(node_datas[j][:Natoms*node_dim])
                b_edge=np.array(edge_datas[j][:Natoms*n_neibors*edge_dim])
                b_rms=rmss[j][:n_batch]
                
                status_codes[j].value=0  #relesae clock
                
                """
                print("n_batch, b_start, b_end, b_node, b_edge, b_rms", n_batch, len(b_start), len(b_end), len(b_node), len(b_edge), len(b_rms))
                print("b_start", b_start)
                print("b_end", b_end)
                print("b_node", b_node)
                """
                #print("start, end, node, edge, label", len(b_start), len(b_end), len(b_node), b_edge.shape, len(b_rms))
                
                #----creating data to gpu------  creating a big graph including batchsize graphs.
                Gs=graph((b_start, b_end), device=my_device)
                Gs.ndata["n1"]=onehot_node(tensor(b_node), onehots).to(my_device)
                Gs.edata["e1"]=reshape(tensor(b_edge, dtype=float32, device=my_device), (Natoms*n_neibors, edge_dim))
                batch_label_gpu=tensor(b_rms, device=my_device,  dtype=float32)               
                
                time1=time()
                pre_label=GNN_model(Gs, Natoms_part).squeeze_()
                
                #print("pre:", pre_label)
                #print("label:", batch_label_gpu)
                
                Loss=lossf(pre_label, batch_label_gpu)
                Loss.backward()
                optimzer.step()
                optimzer.zero_grad()

                nbatch+=1
                total_loss+=n_batch*float(Loss)
                total_samples+=n_batch
                loss_avg=total_loss/total_samples

                time2=time()
                dt1=time1-time0
                dt2=time2-time1
                if rank==0:
                    msg="epoch %s rank %s loss_avg:%.5f  %.4f  %.4f %.6f"%(nepoch, rank, loss_avg, dt1, dt2,  optimzer.param_groups[0]["lr"])
                    print(msg)
                    if nbatch%args.smb==0:
                        save(GNN_model.state_dict(), args.GNNmodel_path+"model%d_%d_%d_%.4f_%.6f.pkl"%(model_index, nepoch, nbatch, loss_avg, optimzer.param_groups[0]["lr"]))
                if epoch_flags[j].value==1:
                    finished+=1
                    #print("rank %s test j epoch_flag to 1 finished %s"%(rank, finished))
                if finished == per_train_data:
                    break
                """
                msg="%d  part_loss:%.4f loss_avg:%.4f  lr:%s  pre_data spend:%.2fs, train spend:%.2fs"%(nbatch, Loss.item(), loss_avg, str(lr), dt1, dt2)
                print(msg)
                f.write(str(msg)+"\n")
                """
                """
                if nepoch%add_T==0:
                    for pg in optimzer.param_groups:
                        lr=lr+add
                        pg["lr"]=lr
                del Gs
                del pre_label
                del Loss
                del batch_label_gpu
                """
        nepoch+=1
        args.epochs_per_train[rank]=nepoch
        msg="rank %s epoch %d finished---------"%(rank, nepoch)
        print(msg)
        #-------------if loss not down, delay-------------
        train_loss.append(loss_avg)
        if len(train_loss)>=args.eds+1:
            now_loss=train_loss[-1]
            if now_loss>=train_loss[len(train_loss)-args.eds]:
                lr=lr*args.gama
                for pg in optimzer.param_groups:
                    pg["lr"]=lr 
                        
        #--------------save model--------------------
        if rank==0:
            save(GNN_model.state_dict(), args.GNNmodel_path+"model%d_%d_%d_%.4f_%.4g.pkl"%(model_index, nepoch, nbatch, loss_avg, optimzer.param_groups[0]["lr"]))
        
        #----------change lr from lr file---------------
        try:
            f=open(lr_path)
            new_lr=float(f.read().strip())
            f.close()
            print("change lr %.6f to %.6f from lr file."%(lr, new_lr))
            lr=new_lr
            for pg in optimzer.param_groups:
                pg["lr"]=lr  
        except:
            pass
            
        if min(args.epochs_per_train)<args.epochs:     
            for i in range(per_train_data * rank, per_train_data * (rank+1)):
                epoch_flags[i].value=0
                #print("rank %s epoch_flags: %s "%(rank, str([v.value for v in epoch_flags])))
            continue
        else:
            exit('all ranks finished  %s epochs training. epochs per train process %s. exit from process %s'%(args.epochs, args.epochs_per_train, rank))       

def main_process():
    
    from torch.multiprocessing import Process, RawArray, RawValue
    import torch.multiprocessing as mp
    from ctypes import cdll, c_int32, c_double, c_void_p, pointer, POINTER, c_short, c_char_p, c_char
    import os
    
    ### transform string paths to c char*.
    path1 = c_char_p(data_path.encode())
    path2 = c_char_p(index_path.encode())
    path3 = c_char_p(out_path.encode())
    #prepared ----------------
    items=os.listdir(RNAList_path)
    calc_nprocess=0
    for dirname in items:
        if "train.txt" in dirname:
            calc_nprocess+=1      
    print('ijob:%s, calc process:%s train_process:%s'%(ijob, calc_nprocess, args.train_nprocess))
    
    #compile pro.cpp-------------------
    if os.path.exists(src_path+'pro.so'):
        print('pro.so is already. try to use it. Please ensure it is the latest version.')
        
    else:
        try:
            os.system("g++ %spro.cpp -o %spro.so -shared -fPIC"%(src_path, src_path))  ### it needs gcc 7.0+ .
        except:
            print("pro.cpp needs g++ version is 7.0+. please install gcc compiler (like mingw).")
            print("or you can run 'g++ pro.cpp -o pro.so -fPIC -shared' in src_path with gcc version that is higer than 7.0.")
            exit(-1)
    
    mylib=cdll.LoadLibrary(src_path+"pro.so")
    mylib.compute_data.argstype=[POINTER(c_char)
                                 ,POINTER(c_char)
                                 ,POINTER(c_char)
                                 ,c_short
                                 ,c_short
                                 ,c_short
                                 ,c_short
                                 ,c_short
                                 ,c_short
                                 ,POINTER(c_int32)
                                 ,POINTER(c_int32)
                                 ,POINTER(c_int32)
                                 ,POINTER(c_int32)
                                 ,POINTER(c_double)
                                 ,POINTER(c_int32)
                                 ,POINTER(c_double)
                                 ,POINTER(c_int32)
                                 ,POINTER(c_int32)]
    
    mylib.compute_data.restype=c_void_p
    print("c++ Module compile success!")
    
    #------creating shared_datas--------
    starts, ends, edge_datas, node_datas, rmss, KNs, status_codes, epoch_flags, n_batchs=[], [], [], [], [], [], [], [], []
    for i in range(calc_nprocess):
        starts.append(RawArray(c_int32, args.batch*max_Natoms*n_neibors))
        ends.append(RawArray(c_int32, args.batch*max_Natoms*n_neibors))
        edge_datas.append(RawArray(c_double, args.batch*max_Natoms*n_neibors*edge_dim))
        node_datas.append(RawArray(c_int32, args.batch*max_Natoms*node_dim))
        rmss.append(RawArray(c_double, args.batch))
        KNs.append(RawArray(c_int32, args.batch))
        status_codes.append(RawValue(c_int32, 0))
        n_batchs.append(RawValue(c_int32, args.batch))
        epoch_flags.append(RawValue(c_int32, 0))
    print('created share space')
    
    #------creating and run compute processes and train processes group
    calc_processes=[]
    for i in range(calc_nprocess):
        P=Process(target=mylib.compute_data, args=(path1
                                                   ,path2
                                                   ,path3
                                                   ,i
                                                   ,ijob
                                                   ,args.batch
                                                   ,n_neibors
                                                   ,node_dim
                                                   ,edge_dim
                                                   ,starts[i]
                                                   ,ends[i]
                                                   ,pointer(status_codes[i])
                                                   ,KNs[i]
                                                   ,edge_datas[i]
                                                   ,node_datas[i]
                                                   ,rmss[i]
                                                   ,pointer(epoch_flags[i])
                                                   ,pointer(n_batchs[i])))
        calc_processes.append(P)
        
    print("main_process intit sucess, start %s compute Processes(c++), %s train Procsess"%(calc_nprocess, args.train_nprocess))
    
    ### start calc processes
    for p in calc_processes:
        p.start()
    
    ### start training processes group with 'spawn' method.
    mp.spawn(train, nprocs = args.train_nprocess, args = (args
                                                          ,starts
                                                          ,ends
                                                          ,edge_datas
                                                          ,node_datas
                                                          ,rmss
                                                          ,KNs
                                                          ,status_codes
                                                          ,epoch_flags
                                                          ,n_batchs)
                                                          ,join=True
                                                          ,start_method='spawn'
                                                          ) 
    
if __name__=="__main__":
    
    ### alloc task to each cacl processes and training processes.
    alloc(train_RNAS=None
          ,test_RNAS=None
          ,ijob=ijob
          ,nprocess=args.calc_nprocess
          ,renew=False
          ,retest=False
          ,re_alloc=True)
    
    ### training
    main_process()


        
    
    

    

     
