import os
import shutil
seed=6
import random
random.seed(seed)#主函数内设置随机数种子,对所有运行的子函数均起作用
import numpy as np
np.set_printoptions(suppress=True)
np.random.seed(seed)
os.environ['PYTHONHASHSEED']=str(seed)
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus):#gpu运行时清空显存
    # tf.config.experimental.set_memory_growth(gpus[0],True)#运行完释放显存
    tf.config.experimental.set_virtual_device_configuration(gpus[0],[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=6144)])
    CUDAPATH=os.environ["CUDA_PATH"]
    CUDAPATH=CUDAPATH.replace('\\','/')
    allstr="--xla_gpu_cuda_data_dir='"+CUDAPATH+"'"
    os.environ["XLA_FLAGS"] =allstr
tf.random.set_seed(seed)
import modelzoo
# In[ ]:物理量常数
R0=6378135;
g0= 9.80665;
DTR=np.pi/180;
RTD=180/np.pi;

class MyLRSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
    def __init__(self, initial_learning_rate,midstep,max_learning_rate,rpower):
        self.initial_learning_rate = initial_learning_rate
        self.max_learning_rate = max_learning_rate
        self.midstep = tf.cast(midstep,tf.float32)
        self.rpower=rpower;
    def __call__(self, step):
        step = tf.cast(step,tf.float32)
        lr=tf.cast(tf.greater(step,self.midstep),tf.float32)*(2.0*self.max_learning_rate/(tf.maximum(step-self.midstep,0.0)**self.rpower+1))+tf.cast(tf.less_equal(step,self.midstep),tf.float32)*(self.initial_learning_rate+(self.max_learning_rate-self.initial_learning_rate)*(step+1)/(self.midstep+1));
        return lr

class MyEXPCchedule(tf.keras.optimizers.schedules.LearningRateSchedule):#周期变化的学习率
    def __init__(self, initial_learning_rate,cyclestep,max_learning_rate,incycle=1,midstep=0):
        self.initial_learning_rate = initial_learning_rate#初始学习率
        self.max_learning_rate = max_learning_rate#最终学习率
        self.cyclestep = tf.cast(cyclestep,tf.float32)#大循环次数
        self.incycle = tf.cast(incycle,tf.float32)#内循环轮次
        self.midstep = tf.cast(midstep,tf.float32)#由很小线性增加到极大的次数
        self.min_learning_rate=1e-8;
    def __call__(self, step):
        step=tf.cast(step,tf.float32);
        stepcc=tf.floor(tf.maximum(step-self.midstep,0.0)/self.incycle);
        lr=tf.cast(tf.greater_equal(step,self.midstep),tf.float32)*tf.pow(self.max_learning_rate/self.initial_learning_rate,(stepcc-tf.floor(stepcc/(self.cyclestep+1.0))*(self.cyclestep+1.0))/self.cyclestep)*self.initial_learning_rate+tf.cast(tf.less(step,self.midstep),tf.float32)*(self.min_learning_rate+(self.max_learning_rate-self.min_learning_rate)*(step+1)/(self.midstep+1));
        return lr
class MyEXPchedule(tf.keras.optimizers.schedules.LearningRateSchedule):#逐渐衰减的学习率
    def __init__(self, initial_learning_rate,cyclestep,max_learning_rate,incycle=1,midstep=0):
        self.initial_learning_rate = initial_learning_rate#学习率小
        self.max_learning_rate = max_learning_rate#学习率大
        self.cyclestep = tf.cast(cyclestep,tf.float32)#经如此多次运算,指数衰减到该值
        self.incycle = tf.cast(incycle,tf.float32)#内循环轮次
        self.midstep = tf.cast(midstep,tf.float32)#由很小线性增加到极大的次数
        self.min_learning_rate=1e-8;
    def __call__(self, step):
        step=tf.cast(step,tf.float32);
        stepcc=tf.floor(tf.maximum(step-self.midstep,0.0)/self.incycle);
        lr=tf.cast(tf.greater_equal(step,self.midstep),tf.float32)*tf.pow(self.initial_learning_rate/self.max_learning_rate,stepcc/self.cyclestep)*self.max_learning_rate+tf.cast(tf.less(step,self.midstep),tf.float32)*(self.min_learning_rate+(self.max_learning_rate-self.min_learning_rate)*(step+1)/(self.midstep+1));
        return lr
class MyCOSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
    def __init__(self, initial_learning_rate,cyclestep,max_learning_rate,incycle=1,midstep=0):
        self.initial_learning_rate = initial_learning_rate#学习率小
        self.max_learning_rate = max_learning_rate#学习率大
        self.cyclestep = tf.cast(cyclestep,tf.float32)#大循环次数
        self.incycle = tf.cast(incycle,tf.float32)#内循环轮次
        self.midstep = tf.cast(midstep,tf.float32)#由很小线性增加到极大的次数
        self.min_learning_rate=1e-8;
        self.PI = tf.constant(3.141592653589793);
    def __call__(self, step):
        step=tf.cast(step,tf.float32);
        stepcc=tf.floor(tf.maximum(step-self.midstep,0.0)/self.incycle);
        lr=tf.cast(tf.greater_equal(step,self.midstep),tf.float32)*(tf.cos(0.5*self.PI*(stepcc-tf.floor(stepcc/(self.cyclestep+1.0))*(self.cyclestep+1.0))/self.cyclestep)*(self.max_learning_rate-self.initial_learning_rate)+self.initial_learning_rate)+tf.cast(tf.less(step,self.midstep),tf.float32)*(self.min_learning_rate+(self.max_learning_rate-self.min_learning_rate)*(step+1)/(self.midstep+1));
        return lr
class MySDchedule(tf.keras.optimizers.schedules.LearningRateSchedule):#sin循环且最大值衰减
    def __init__(self, initial_learning_rate,cyclestep,max_learning_rate,incycle=1,midstep=0,rpr=0):
        self.initial_learning_rate = initial_learning_rate#学习率小
        self.max_learning_rate = max_learning_rate#学习率大
        self.cyclestep = tf.cast(cyclestep,tf.float32)#大循环次数
        self.incycle = tf.cast(incycle,tf.float32)#内循环轮次
        self.midstep = tf.cast(midstep,tf.float32)#由很小线性增加到极大的次数
        self.min_learning_rate=1e-8;
        self.PI = tf.constant(3.141592653589793);
        self.rpr=tf.cast(rpr,tf.float32)#总学习率单循环轮衰减值
    def __call__(self, step):
        step=tf.cast(step,tf.float32);
        stepcc=tf.floor(tf.maximum(step-self.midstep,0.0)/self.incycle);
        max_learning_rate=tf.cast(tf.greater_equal(step,self.midstep),tf.float32)*tf.pow(self.rpr,stepcc/self.cyclestep)*self.max_learning_rate+tf.cast(tf.less(step,self.midstep),tf.float32)*(self.max_learning_rate);
        lr=tf.cast(tf.greater_equal(step,self.midstep),tf.float32)*(tf.sin(self.PI*(stepcc/(self.cyclestep+1.0)-tf.floor(stepcc/(self.cyclestep+1.0))))*(max_learning_rate-self.initial_learning_rate)+self.initial_learning_rate)+tf.cast(tf.less(step,self.midstep),tf.float32)*(self.min_learning_rate+(self.initial_learning_rate-self.min_learning_rate)*(step+1)/(self.midstep+1));
        return lr
# In[ ]:造样本设置
timemaxmin=np.array([5000,200]);#最大/最小飞行时间
tranum=40000;#测试集样本量
trainnum=1000000;#训练集样本量
rpnum=2;#训练集中,对每一组随机的初始状态,生成剖面参数取不同值时的样本
sampsign=False;#是否重新造样本
modelload=False;
loadpoch=190000;
obnum=3;#初始状态量个数
actnum=9;#轨迹参数量个数
valuenum=10;#终端约束量个数,包括时间
# In[ ]:DLL初始化
if sampsign:
    alllist=['REENTRY.dll','REENTRY.exp','REENTRY.lib','REENTRY.pdb','x.txt','y.txt','z.txt','w.txt','cdvalue.txt','clvalue.txt'];
    path=os.getcwd();
    parent = os.path.abspath(os.path.join(path, os.pardir));
    for filename in alllist:
        newpath=path+'\\'+filename;
        oldpath=parent+'\\'+filename;
        shutil.copyfile(oldpath,newpath);

np.set_printoptions(suppress=True)

import ctypes
lib = ctypes.cdll.LoadLibrary(".\\REENTRY.dll")  
# 指定函数中的输入参数类型
lib.Agent_stepForward.argtypes = (ctypes.POINTER(ctypes.c_double),)
lib.State_set.argtypes = (ctypes.POINTER(ctypes.c_double),)
lib.atreat.argtypes = (ctypes.c_bool,)
lib.amin_reset.argtypes = (ctypes.POINTER(ctypes.c_double),)
lib.amax_reset.argtypes = (ctypes.POINTER(ctypes.c_double),)
# 指定函数中的返回类型
#POINTER为返回的数组,转为numpy后一定要copy,防止随DLL改变
lib.Agent_stepForward.restype = ctypes.POINTER(ctypes.c_double)
lib.State_set.restype = ctypes.POINTER(ctypes.c_double)        
acttreatsign=True;#动作量超出范围的处理方式,False为硬限幅,True为用周期函数转化到区间的限幅
maxang=np.array([1,1],dtype=np.float32);#动作量最大值
minang=np.array([-1,-1],dtype=np.float32);#动作量最小值

lib.atreat(acttreatsign)
lib.amax_reset(np.ctypeslib.as_ctypes(maxang.astype(np.float64)))
lib.amin_reset(np.ctypeslib.as_ctypes(minang.astype(np.float64)))
lib.ainterval()
lib.tmax_reset(np.ctypeslib.as_ctypes(timemaxmin.astype(np.float64)));
# In[ ]:造样本
if sampsign:
    state0s=np.zeros((tranum,obnum));#初始状态
    params=np.zeros((tranum,actnum));#弹道参数
    Fstates=np.zeros((tranum,valuenum));#终端状态
    for trano in range(tranum):
        r = (50e3 + 10e3 * np.random.rand()) + R0;
        V = (5.0e3 + 0.5e3 * np.random.rand());
        gamma = -(2 * np.random.rand()) * DTR;
        state0=np.array([r,V,gamma]);
        state0s[trano,:]=state0;
        lib.State_set(np.ctypeslib.as_ctypes(state0.astype(np.float64)));
        
        dratio0 = np.random.rand();
        dratio_mid = np.random.rand();
        dratiof = np.random.rand();
        vratio = np.random.rand();
        ldratio0 = np.random.rand();
        ldratio_mid = np.random.rand();
        ldratiof = np.random.rand();
        Vf = 1500+1000*np.random.rand();
        reversetime=-500+1000*np.random.rand();
        param=np.array([dratio0,dratio_mid,dratiof,vratio,ldratio0,ldratio_mid,ldratiof,Vf,reversetime]);
        params[trano,:]=param;
        Fstate=np.ctypeslib.as_array(lib.Agent_stepForward(np.ctypeslib.as_ctypes(param.astype(np.float64))), shape=[valuenum]).astype(np.float32);
        Fstates[trano,:]=Fstate;
        
    alldatax=(np.concatenate((state0s,params),axis=-1)).astype(np.float32);#样本集输入
    alldatay=(Fstates).astype(np.float32);#样本集输出

    state0s=np.zeros((trainnum,obnum));#初始状态
    params=np.zeros((trainnum,actnum));#弹道参数
    Fstates=np.zeros((trainnum,valuenum));#终端状态
    for trano in range(trainnum):
        if trano%rpnum==0:#重新生成初始状态
            r = (50e3 + 10e3 * np.random.rand()) + R0;
            V = (5.0e3 + 0.5e3 * np.random.rand());
            gamma = -(2 * np.random.rand()) * DTR;
        state0=np.array([r,V,gamma]);
        state0s[trano,:]=state0;
        lib.State_set(np.ctypeslib.as_ctypes(state0.astype(np.float64)));
        
        dratio0 = np.random.rand();
        dratio_mid = np.random.rand();
        dratiof = np.random.rand();
        vratio = np.random.rand();
        ldratio0 = np.random.rand();
        ldratio_mid = np.random.rand();
        ldratiof = np.random.rand();
        Vf = 1500+1000*np.random.rand();
        reversetime=-500+1000*np.random.rand();
        param=np.array([dratio0,dratio_mid,dratiof,vratio,ldratio0,ldratio_mid,ldratiof,Vf,reversetime]);
        params[trano,:]=param;
        Fstate=np.ctypeslib.as_array(lib.Agent_stepForward(np.ctypeslib.as_ctypes(param.astype(np.float64))), shape=[valuenum]).astype(np.float32);
        Fstates[trano,:]=Fstate;
        print(trano)
            
    datax=(np.concatenate((state0s,params),axis=-1)).astype(np.float32);#样本集输入
    datay=(Fstates).astype(np.float32);#样本集输出
    
    for s in ['datax','datay','alldatax','alldatay']:    
        np.savetxt(s+'.txt',np.array(eval(s)),fmt='%.5g')
else:
    for s in ['datax','datay','alldatax','alldatay']:    
        exec(s+'=(np.loadtxt(\''+s+'.txt\''+',ndmin=2'+')).astype(np.float32)')
alldatax=np.concatenate((alldatax,datax[-10000:]),axis=0); 
alldatay=np.concatenate((alldatay,datay[-10000:]),axis=0); 
datax=datax[:200000];
datay=datay[:200000];   
tranum=alldatax.shape[0];#测试集样本量
trainnum=datax.shape[0];#训练集样本量  
def transtate(oristate):#把动力学输出的终端状态取所需的,单位一致
    #输出为角度制    
    if oristate.ndim==2:   
        fstate=np.concatenate((oristate[:,[1,2]]*RTD,oristate[:,[3]]),axis=-1);   
    else:
        fstate=np.concatenate((oristate[[1,2]]*RTD,oristate[[3]]),axis=-1);
        fstate=np.squeeze(fstate)
    return fstate

datay=transtate(datay);
alldatay=transtate(alldatay);
# datay=np.concatenate((alldatay,datay),axis=0);#训练时用所有样本
# datax=np.concatenate((alldatax,datax),axis=0);#训练时用所有样本
# In[ ]:拟合网络训练
inpnum=datax.shape[-1];
outnum=datay.shape[-1];
        
modelno=569
mypredictmodel=modelzoo.predictmodel569(inpnum=inpnum,outnum=outnum);
lossstr='MeanSquaredError';

predlr=0.001;#学习率
predlrpb=predlr*0.2;#公共部分调节学习率
predlrwt=predlr*0.2;#微调学习率

prebatchsize=2048#minibatch个数
prebatchsizepb=1024#minibatch个数
prebatchsizewt=512#minibatch个数
tno=4;
trainbnum=prebatchsize*tno;#训练集每次取的样本量
tnopb=tno*int(prebatchsize/prebatchsizepb);
trainbnumpb=min(prebatchsizepb*tnopb,trainbnum);#训练集每次取的样本量
tnowt=tno*int(prebatchsize/prebatchsizewt);
trainbnumwt=min(prebatchsizewt*tnowt,trainbnum);#训练集每次取的样本量
yxpoch=1;#每次全部预训练次数
pbpoch=1;#每次公共部分预训练次数
pwratio=1;#公共预训练与微调的比例
wtpoch=pwratio*pbpoch;#预训练后,每个输出变量微调次数

choosepoch=2;#一次读取训练样本后训练几轮
addpoch=(yxpoch*tno+pbpoch*tnopb+wtpoch*tnowt)*choosepoch;#学习次数
trainno=int(2000000/addpoch);
wlpoch=int(10000/addpoch);#测试的周期
savepoch=int(10000000/addpoch);#存网络的周期
#周期学习率
acycle=512;
midstep=512;
crishedule=MySDchedule(predlr*1e-1,acycle,predlr,incycle=tno,midstep=midstep,rpr=0.998);
crishedulepb=MySDchedule(predlrpb*1e-1,acycle,predlrpb,incycle=tnopb,midstep=midstep,rpr=0.998);
crishedulewt=MySDchedule(predlrwt*1e-1,acycle,predlrwt,incycle=tnowt,midstep=midstep,rpr=0.998);
criopt=tf.keras.optimizers.Adam(learning_rate=crishedule,global_clipnorm=55.0)#global_clipnorm=2.0#,clipvalue=2.0
crioptwt=tf.keras.optimizers.Adam(learning_rate=crishedulewt,global_clipnorm=10.0)
crioptpb=tf.keras.optimizers.Adam(learning_rate=crishedulepb,global_clipnorm=50.0)#,global_clipnorm=100.0

mypredictmodel(alldatax);
# In[ ]:分出多分支输出变量和公共部分变量,这一步要在网络做一次前向传播、有权值之后再进行
criend_varlist=[];#多分支输出变量
end_name=[];
for outno in range(outnum):
    for ii in range(len(mypredictmodel.wtlayers)):
        criend_varlist=criend_varlist+mypredictmodel.wtlayers[ii][outno].trainable_variables
for vv in criend_varlist:
    end_name.append(vv.name);
cripb_varlist=[];#公共部分变量
for vv in mypredictmodel.trainable_variables:
    if (vv.name in end_name)==False:
        cripb_varlist.append(vv)
# In[ ]:输入归一化
mmean=np.mean(datax,axis=0);
mstd=np.square(np.std(datax,axis=0));
for ss in range(len(mypredictmodel.bn0.weights)):
    if 'moving_mean' in mypredictmodel.bn0.weights[ss].name:
        mypredictmodel.bn0.weights[ss].assign(mmean)
        break
for ss in range(len(mypredictmodel.bn0.weights)):
    if 'moving_var' in mypredictmodel.bn0.weights[ss].name:
        mypredictmodel.bn0.weights[ss].assign(mstd)
        break
# In[ ]:初始权重小一些
noise=1e-2;
numev=len(mypredictmodel.weights);
totratio=tf.pow(noise,tf.clip_by_value(tf.random.normal([1],0.05,0.5),0.03,0.5));
for ss in range(numev):
    wname=mypredictmodel.weights[ss].name;
    if ('batch_normalization' in wname):
        pass
    # elif ('moving_mean' in wname)|('moving_variance' in wname):
    else:
        tshape=mypredictmodel.weights[ss].shape;
        fanin=tshape[0];
        if 'bias' in wname:
            mypredictmodel.weights[ss].assign(5e-6*tf.random.truncated_normal(tshape,0.0,tf.sqrt(1.0/fanin)));#使用初始化器制造噪声
        else:
            mypredictmodel.weights[ss].assign(totratio*tf.pow(noise,tf.clip_by_value(tf.random.normal([1],0.0,0.25),0.0,0.5))*tf.random.truncated_normal(tshape,0.0,tf.sqrt(1.0/fanin)));#使用初始化器制造噪声
# In[ ]:调所有参数
@tf.function(jit_compile=True,input_signature=[tf.TensorSpec(shape=[None,inpnum], dtype=tf.float32),tf.TensorSpec(shape=[None,outnum], dtype=tf.float32)])
def critrain(ddx:tf.Tensor,ddy:tf.Tensor):#均方误差        
    with tf.GradientTape() as tape: 
        loss = tf.reduce_mean(tf.square(ddy-mypredictmodel(ddx,training=True)));
    criopt.minimize(loss,var_list=mypredictmodel.trainable_variables,tape=tape)    
# In[ ]:调多分支参数,由于各分支互不影响,可以写在一起
@tf.function(jit_compile=True,input_signature=[tf.TensorSpec(shape=[None,inpnum], dtype=tf.float32),tf.TensorSpec(shape=[None,outnum], dtype=tf.float32)])
def critrainWT(ddx:tf.Tensor,ddy:tf.Tensor):#均方误差  
    with tf.GradientTape() as tape: 
        loss = tf.reduce_mean(tf.square(ddy-mypredictmodel(ddx,training=True)));
    crioptwt.minimize(loss,var_list=criend_varlist,tape=tape)
# In[ ]:调公共部分参数
@tf.function(jit_compile=True,input_signature=[tf.TensorSpec(shape=[None,inpnum], dtype=tf.float32),tf.TensorSpec(shape=[None,outnum], dtype=tf.float32)])
def critrainPB(ddx:tf.Tensor,ddy:tf.Tensor):#均方误差
    with tf.GradientTape() as tape: 
        loss = tf.reduce_mean(tf.square(ddy-mypredictmodel(ddx,training=True)));
    crioptpb.minimize(loss,var_list=cripb_varlist,tape=tape)
# In[ ]:输出归一化
from sklearn.preprocessing import StandardScaler,MinMaxScaler,RobustScaler
yscaler=MinMaxScaler();
lossstr=lossstr+type(yscaler).__name__;
MMdatay=yscaler.fit_transform(datay);
print('model'+str(modelno)+'batch'+str(prebatchsize)+'损失函数'+lossstr+'学习率'+"{:.2e}".format(predlr))
# In[ ]:    
if modelload:
    specname=str(modelno)+'ORIGINBATCH'+str(prebatchsize)+'POCH'+str(loadpoch)+lossstr+'lR'+"{:.2e}".format(predlr)
    mypredictmodel.load_weights('mypredictmodel'+specname)
else:
    with open('netloss.txt','w')as f:
        pass
    ####记录历史最小残差,以确定是否停止网络
    ndatay=yscaler.inverse_transform(mypredictmodel(alldatax).numpy());#输出反归一化
    wholeloss=np.mean(np.square(ndatay-alldatay),axis=(0));
    lossmin=wholeloss;#历史最小残差值
    ####记录历史最小残差,以确定是否停止网络  
    wholepoch=0; 
    for ii in range(trainno):
        #重新取训练样本
        rindex=np.random.choice(trainnum,trainbnum,replace=False);
        bdatax=datax[rindex,:];
        bdatay=MMdatay[rindex,:];
        rindexpb=np.random.choice(trainbnum,trainbnumpb,replace=False);
        bdataxpb=bdatax[rindexpb,:];
        bdataypb=bdatay[rindexpb,:];
        rindexwt=np.random.choice(trainbnum,trainbnumwt,replace=False);
        bdataxwt=bdatax[rindexwt,:];
        bdataywt=bdatay[rindexwt,:];
        for jj in range(choosepoch):
            valuetraindatas = tf.data.Dataset.from_tensor_slices((bdatax,bdatay)).batch(prebatchsize);#将数据划分多个minibatch
            valuetraindataspb = tf.data.Dataset.from_tensor_slices((bdataxpb,bdataypb)).batch(prebatchsizepb);#将数据划分多个minibatch
            valuetraindataswt= tf.data.Dataset.from_tensor_slices((bdataxwt,bdataywt)).batch(prebatchsizewt);#将数据划分多个minibatch
            for eno in range(yxpoch):
                for data in valuetraindatas:
                    critrain(data[0],data[1])   
            for eno in range(pbpoch):
                for data in valuetraindataspb:                 
                    critrainPB(data[0],data[1]);
                for pw in range(pwratio):                                    
                    for data in valuetraindataswt:                 
                        critrainWT(data[0],data[1]);            
        wholepoch=wholepoch+addpoch;
        
        if ((ii+1)%wlpoch==0):
            specname=str(modelno)+'ORIGINBATCH'+str(prebatchsize)+'POCH'+str(wholepoch)+lossstr+'lR'+"{:.2e}".format(predlr)
            ndatay=yscaler.inverse_transform(mypredictmodel(alldatax).numpy());#输出反归一化
            wholeloss=np.mean(np.square(ndatay-alldatay),axis=(0));
            maxloss=np.max(np.abs(ndatay-alldatay),axis=(0))    
            # np.savetxt(specname+'FFLOSS'+'.txt',np.concatenate((ndatay,alldatay),axis=-1),fmt='%.5g')
            with open('netloss.txt','a')as f:
                np.savetxt(f,[np.concatenate((np.array([wholepoch]),wholeloss,maxloss),axis=-1)],fmt='%.5g');#策略训练后记录critic网络估计actor网络动作值函数的过程量   
            print('iter'+str(wholepoch)+'test'+str(wholeloss)+';MAX'+str(maxloss));
            #用绝对误差是否远差于历史最好判断是否发散
            # if np.sum(wholeloss)>2*np.sum(lossmin):
            # #用绝对误差是否超范围判断是否发散
            # # if np.sum(maxloss)>5*np.sum(np.max(alldatay,axis=0)-np.min(np.concatenate([np.min(alldatay,axis=0,keepdims=True),np.zeros_like(np.min(alldatay,axis=0,keepdims=True))],axis=0),axis=0)):
            #     # 网络已经发散,直接跳出
            #     break;  
            if ((ii+1)%savepoch==0)|(np.sum(wholeloss)<=np.sum(lossmin)):
                mypredictmodel.save_weights('mypredictmodel'+specname)
                lossmin=wholeloss          
# In[ ]:
da=alldatay[:,0];
db=datay[:,0];
ds=np.setdiff1d(da,db);#选取与训练集不同的样本进行测试
ds=da;
w1=np.where(da==ds[np.random.choice(ds.shape[0],1,replace=False)]);
w2=np.random.choice(alldatax.shape[0],1,replace=False);
rindex=np.array([w1[0][0],w2[0]])
# In[ ]:利用训练的可微拟合网络计算,指定终端状态经纬度、时间等
#保存模型为tf.lite格式,便于展示
# converter = tf.lite.TFLiteConverter.from_keras_model(mypredictmodel)
# tflite_model = converter.convert()
# open(specname+'.tflite', 'wb').write(tflite_model)

dataymax=tf.constant(yscaler.data_max_,dtype=tf.float32);
dataymin=tf.constant(yscaler.data_min_,dtype=tf.float32);
iterpoch=2500;#迭代次数

consnum=datay.shape[-1];#约束量个数

state0=tf.constant(alldatax[[rindex[0]],:obnum],dtype=tf.float32);#随机初始化迭代变量
goal=tf.constant(alldatay[[rindex[0]],:],dtype=tf.float32);#目标值

input0=tf.constant(alldatax[[rindex[1]],obnum:],dtype=tf.float32);#随机初始化迭代变量
inputgoal=tf.constant(alldatax[[rindex[0]],obnum:],dtype=tf.float32);#可行变量
maxerror=tf.abs(goal)*0.01;#容许误差值           
# In[ ]:梯度下降法
methodname='GRAD'

myinput=tf.Variable(input0,dtype=tf.float32)
inputhis5=[];#输入的历史
outhis5=[];#输出的历史
errorhis5=[];#均方差的历史
iterlr=1e-5;#迭代学习率
iteropt=tf.keras.optimizers.Adam(learning_rate=iterlr)   
 
for iterno in range(iterpoch):
    with tf.GradientTape(persistent=True) as tape:        
        out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
        error=out-goal;
        totalloss=tf.reduce_mean(tf.square(error));#目标：经度、纬度接近,时间最短
    totalgrad=tape.gradient(totalloss,myinput)
    iteropt.apply_gradients(zip([totalgrad],[myinput]))
    del tape

    out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
    error=out-goal;
           
    inputhis5.append(myinput.numpy());
    outhis5.append(out.numpy());
    errorhis5.append(error.numpy())
    
    if tf.reduce_sum(tf.square(error))<1e-6:
        break

#网络预示的轨迹误差
out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
error=out-goal;
#求轨迹实际误差
lib.State_set(np.ctypeslib.as_ctypes((np.squeeze(state0.numpy())).astype(np.float64)));
Fstate=np.ctypeslib.as_array(lib.Agent_stepForward(np.ctypeslib.as_ctypes((np.squeeze(myinput.numpy())).astype(np.float64))), shape=[valuenum]).astype(np.float32);
realerror=transtate(Fstate)-goal.numpy();
planresult=np.transpose(np.concatenate((goal.numpy(),realerror,error.numpy()),axis=0))
for s in ['inputhis5','outhis5','errorhis5']:    
    np.savetxt(s+methodname+'.txt',np.squeeze(np.array(eval(s))),fmt='%.5g')  

for s in ['planresult']:    
    np.savetxt(s+methodname+'.txt',eval(s),fmt='%.5g')         
# In[ ]:增广拉格朗日乘子法
methodname='LARG'
with open('finallarghis.txt','w')as f:
    pass
myinput=tf.Variable(input0,dtype=tf.float32)
inputhis5=[];#输入的历史
outhis5=[];#输出的历史
errorhis5=[];#均方差的历史
iterlr=1e-5;#迭代学习率
initlarglambda=0.1*tf.ones(goal.shape);#Lagrange乘子初始值
larglambda=tf.Variable(initlarglambda);#Lagrange乘子
larglambdamax=tf.constant(1000000.0);#Lagrange乘子之和最大值
largnorm=tf.constant(0.1);#若超出最大值则化到Lagrange乘子之和最大值的largnorm倍
larglr0=tf.constant(0.001);#增广Lagrange罚函数系数初值
larglr=tf.Variable(larglr0);#增广Lagrange罚函数系数
larglrmax=tf.constant(1.0);#增广Lagrange罚函数系数最大值
largbound=tf.constant(0.999999);#约束值与上次相比变化的倍数
largeta=tf.constant(1.001);#若约束降低不让largbound限制则扩大增广Lagrange罚函数系数
iteropt=tf.keras.optimizers.Adam(learning_rate=iterlr)   
 
for iterno in range(iterpoch):
    clossold=tf.abs(error)-maxerror;
    consign=tf.cast(tf.greater(clossold+larglambda/larglr,0.0),tf.float32);#是否c+lambda/miu<=0    
    #############梯度下降法优化
    with tf.GradientTape(persistent=True) as tape:        
        out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
        error=out-goal;
        rloss=-tf.reduce_sum(tf.square(myinput));#使控制量均方和最小
        closs=tf.abs(error)-maxerror;              
        totalloss=(-rloss+tf.reduce_sum((larglambda*closs+larglr*tf.square(closs)/2)*consign));
    totalgrad=tape.gradient(totalloss,myinput)
    iteropt.apply_gradients(zip([totalgrad],[myinput]))
    del tape

    out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
    error=out-goal;
    closs=tf.abs(error)-maxerror;    
    #############拉格朗日乘子修正
    larglambdaold=larglambda.numpy();#Lagrange乘子未变动之前的值

    #Lagrange乘子修正        
    larglambda.assign(tf.math.maximum(0.0,larglambda+larglr*closs));#Lagrange乘子不小于0
    if tf.reduce_sum(larglambda)>larglambdamax:#拉格朗日乘子如果过大化到区间内
        larglambda.assign(larglambda*largnorm*larglambdamax/tf.reduce_sum(larglambda));
    #Lagrange增广项系数修正
    if (np.sum(closs)>0)&(np.sum(closs*larglambdaold)>largbound*np.sum(clossold*larglambdaold)):
        larglr.assign(larglr*largeta);#若约束值仍不下降,要调大罚函数权重
    larglr.assign(tf.math.minimum(larglr,larglrmax));#罚函数权重限制值
    with open('finallarghis.txt','a')as f:
        np.savetxt(f,[np.append(larglambda.numpy(),larglr)],fmt='%.5g');#策略训练后记录Lagrange乘子及惩罚项权值             
    #############数据记录            
    inputhis5.append(myinput.numpy());
    outhis5.append(out.numpy());
    errorhis5.append(error.numpy())
    
    if tf.reduce_sum(tf.square(error))<1e-6:
        break

#网络预示的轨迹误差
out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
error=out-goal;
#求轨迹实际误差
lib.State_set(np.ctypeslib.as_ctypes((np.squeeze(state0.numpy())).astype(np.float64)));
Fstate=np.ctypeslib.as_array(lib.Agent_stepForward(np.ctypeslib.as_ctypes((np.squeeze(myinput.numpy())).astype(np.float64))), shape=[valuenum]).astype(np.float32);
realerror=transtate(Fstate)-goal.numpy();
planresult=np.transpose(np.concatenate((goal.numpy(),realerror,error.numpy()),axis=0))
for s in ['inputhis5','outhis5','errorhis5']:    
    np.savetxt(s+methodname+'.txt',np.squeeze(np.array(eval(s))),fmt='%.5g')  

for s in ['planresult']:    
    np.savetxt(s+methodname+'.txt',eval(s),fmt='%.5g')
# In[ ]:增广拉格朗日乘子法,约束值归一化(除以约束值的标准差模),相当于强化学习中lvariance9,avariance9
methodname='LARGV9'
with open('finallarghis.txt','w')as f:
    pass
myinput=tf.Variable(input0,dtype=tf.float32)
inputhis5=[];#输入的历史
outhis5=[];#输出的历史
errorhis5=[];#均方差的历史
iterlr=5e-4;#迭代学习率
QCgreedy=0.01;#约束的软更新比
initlarglambda=0.1*tf.ones(goal.shape);#Lagrange乘子初始值
larglambda=tf.Variable(initlarglambda);#Lagrange乘子
larglambdamax=tf.constant(1000000.0);#Lagrange乘子之和最大值
largnorm=tf.constant(0.1);#若超出最大值则化到Lagrange乘子之和最大值的largnorm倍
larglr0=tf.constant(0.001);#增广Lagrange罚函数系数初值
larglr=tf.Variable(larglr0);#增广Lagrange罚函数系数
larglrmax=tf.constant(1.0);#增广Lagrange罚函数系数最大值
largbound=tf.constant(0.999999);#约束值与上次相比变化的倍数
largeta=tf.constant(1.001);#若约束降低不让largbound限制则扩大增广Lagrange罚函数系数
iteropt=tf.keras.optimizers.Adam(learning_rate=iterlr)   

out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
error=out-goal;
errormen=error;#具有动量稳定的error
errornorm=tf.maximum(tf.norm(errormen),0.1)*tf.ones_like(error);

def softchange_QCstd(errornew,errorm):#软更新QCstd
    errorm=(1-QCgreedy)*errorm+QCgreedy*errornew;
    return errorm

for iterno in range(iterpoch):
    errormen=softchange_QCstd(error,errormen);
    errornorm=tf.maximum(tf.norm(errormen),0.1)*tf.ones_like(error);
    clossold=(tf.abs(error)-maxerror)/errornorm;#用于计算乘子时的数归一化
    consign=tf.cast(tf.greater(clossold+larglambda/larglr,0.0),tf.float32);#是否c+lambda/miu<=0    
    #############梯度下降法优化
    with tf.GradientTape(persistent=True) as tape:        
        out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
        error=(out-goal);
        rloss=-tf.reduce_sum(tf.square(myinput));#使控制量均方和最小
        closs=(tf.abs(error)-maxerror)/errornorm;#计算梯度时约束值归一化               
        totalloss=(-rloss+tf.reduce_sum((larglambda*closs+larglr*tf.square(closs)/2)*consign));
    totalgrad=tape.gradient(totalloss,myinput)
    iteropt.apply_gradients(zip([totalgrad],[myinput]))
    del tape

    out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
    error=out-goal;
    closs=(tf.abs(error)-maxerror)/errornorm;#用于计算乘子时的数归一化    
    #############拉格朗日乘子修正
    larglambdaold=larglambda.numpy();#Lagrange乘子未变动之前的值

    #Lagrange乘子修正        
    larglambda.assign(tf.math.maximum(0.0,larglambda+larglr*closs));#Lagrange乘子不小于0
    if tf.reduce_sum(larglambda)>larglambdamax:#拉格朗日乘子如果过大化到区间内
        larglambda.assign(larglambda*largnorm*larglambdamax/tf.reduce_sum(larglambda));
    #Lagrange增广项系数修正
    if (np.sum(closs)>0)&(np.sum(closs*larglambdaold)>largbound*np.sum(clossold*larglambdaold)):
        larglr.assign(larglr*largeta);#若约束值仍不下降,要调大罚函数权重
    larglr.assign(tf.math.minimum(larglr,larglrmax));#罚函数权重限制值
    with open('finallarghis.txt','a')as f:
        np.savetxt(f,[np.append(larglambda.numpy(),larglr)],fmt='%.5g');#策略训练后记录Lagrange乘子及惩罚项权值             
    #############数据记录            
    inputhis5.append(myinput.numpy());
    outhis5.append(out.numpy());
    errorhis5.append(error.numpy())
    
    if tf.reduce_sum(tf.square(error))<1e-6:
        break

#网络预示的轨迹误差
out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
error=out-goal;
#求轨迹实际误差
lib.State_set(np.ctypeslib.as_ctypes((np.squeeze(state0.numpy())).astype(np.float64)));
Fstate=np.ctypeslib.as_array(lib.Agent_stepForward(np.ctypeslib.as_ctypes((np.squeeze(myinput.numpy())).astype(np.float64))), shape=[valuenum]).astype(np.float32);
realerror=transtate(Fstate)-goal.numpy();
planresult=np.transpose(np.concatenate((goal.numpy(),realerror,error.numpy()),axis=0))
for s in ['inputhis5','outhis5','errorhis5']:    
    np.savetxt(s+methodname+'.txt',np.squeeze(np.array(eval(s))),fmt='%.5g')  

for s in ['planresult']:    
    np.savetxt(s+methodname+'.txt',eval(s),fmt='%.5g')
# In[ ]:增广拉格朗日乘子法,约束值归一化(除以标准差),相当于强化学习中lvariance7,avariance6
methodname='LARGV7'
with open('finallarghis.txt','w')as f:
    pass
myinput=tf.Variable(input0,dtype=tf.float32)
inputhis5=[];#输入的历史
outhis5=[];#输出的历史
errorhis5=[];#均方差的历史
iterlr=5e-4;#迭代学习率
QCgreedy=0.01;#约束的软更新比
initlarglambda=0.1*tf.ones(goal.shape);#Lagrange乘子初始值
larglambda=tf.Variable(initlarglambda);#Lagrange乘子
larglambdamax=tf.constant(1000000.0);#Lagrange乘子之和最大值
largnorm=tf.constant(0.1);#若超出最大值则化到Lagrange乘子之和最大值的largnorm倍
larglr0=tf.constant(0.001);#增广Lagrange罚函数系数初值
larglr=tf.Variable(larglr0);#增广Lagrange罚函数系数
larglrmax=tf.constant(1.0);#增广Lagrange罚函数系数最大值
largbound=tf.constant(0.999999);#约束值与上次相比变化的倍数
largeta=tf.constant(1.001);#若约束降低不让largbound限制则扩大增广Lagrange罚函数系数
iteropt=tf.keras.optimizers.Adam(learning_rate=iterlr)   

out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
error=out-goal;
errormen=error;#具有动量稳定的error
errornorm=tf.abs(errormen);

def softchange_QCstd(errornew,errorm):#软更新QCstd
    errorm=(1-QCgreedy)*errorm+QCgreedy*errornew;
    return errorm

for iterno in range(iterpoch):
    errormen=softchange_QCstd(error,errormen);
    errornorm=tf.abs(errormen);
    clossold=(tf.abs(error)-maxerror)/errornorm;#用于计算乘子时的数归一化
    consign=tf.cast(tf.greater(clossold+larglambda/larglr,0.0),tf.float32);#是否c+lambda/miu<=0    
    #############梯度下降法优化
    with tf.GradientTape(persistent=True) as tape:        
        out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
        error=(out-goal);
        rloss=-tf.reduce_sum(tf.square(myinput));#使控制量均方和最小
        closs=(tf.abs(error)-maxerror)/errornorm;#计算梯度时约束值归一化 ;             
        totalloss=(-rloss+tf.reduce_sum((larglambda*closs+larglr*tf.square(closs)/2)*consign));
    totalgrad=tape.gradient(totalloss,myinput)
    iteropt.apply_gradients(zip([totalgrad],[myinput]))
    del tape

    out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
    error=out-goal;
    closs=(tf.abs(error)-maxerror)/errornorm;#用于计算乘子时的数归一化    
    #############拉格朗日乘子修正
    larglambdaold=larglambda.numpy();#Lagrange乘子未变动之前的值

    #Lagrange乘子修正        
    larglambda.assign(tf.math.maximum(0.0,larglambda+larglr*closs));#Lagrange乘子不小于0
    if tf.reduce_sum(larglambda)>larglambdamax:#拉格朗日乘子如果过大化到区间内
        larglambda.assign(larglambda*largnorm*larglambdamax/tf.reduce_sum(larglambda));
    #Lagrange增广项系数修正
    if (np.sum(closs)>0)&(np.sum(closs*larglambdaold)>largbound*np.sum(clossold*larglambdaold)):
        larglr.assign(larglr*largeta);#若约束值仍不下降,要调大罚函数权重
    larglr.assign(tf.math.minimum(larglr,larglrmax));#罚函数权重限制值
    with open('finallarghis.txt','a')as f:
        np.savetxt(f,[np.append(larglambda.numpy(),larglr)],fmt='%.5g');#策略训练后记录Lagrange乘子及惩罚项权值             
    #############数据记录            
    inputhis5.append(myinput.numpy());
    outhis5.append(out.numpy());
    errorhis5.append(error.numpy())
    
    if tf.reduce_sum(tf.square(error))<1e-6:
        break

#网络预示的轨迹误差
out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
error=out-goal;
#求轨迹实际误差
lib.State_set(np.ctypeslib.as_ctypes((np.squeeze(state0.numpy())).astype(np.float64)));
Fstate=np.ctypeslib.as_array(lib.Agent_stepForward(np.ctypeslib.as_ctypes((np.squeeze(myinput.numpy())).astype(np.float64))), shape=[valuenum]).astype(np.float32);
realerror=transtate(Fstate)-goal.numpy();
planresult=np.transpose(np.concatenate((goal.numpy(),realerror,error.numpy()),axis=0))
for s in ['inputhis5','outhis5','errorhis5']:    
    np.savetxt(s+methodname+'.txt',np.squeeze(np.array(eval(s))),fmt='%.5g')  

for s in ['planresult']:    
    np.savetxt(s+methodname+'.txt',eval(s),fmt='%.5g')
# In[ ]:增广拉格朗日乘子法,约束值归一化(除以标准差模/C个数),相当于强化学习中lvariance10,avariance10
# out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;    
# error=out-goal;
# errormen=softchange_QCstd(error,errormen);
# errornorm=tf.maximum(tf.norm(errormen)/consnum,0.1)*tf.ones_like(error);
# clossold=(tf.abs(error)-maxerror)/errornorm;#用于计算乘子时的数归一化
# consign=tf.cast(tf.greater(clossold+larglambda/larglr,0.0),tf.float32);#是否c+lambda/miu<=0    
# #############梯度下降法优化
# with tf.GradientTape(persistent=True) as tape:        
#     out=mypredictmodel(tf.concat((state0,myinput),axis=-1))*(dataymax-dataymin)+dataymin;
#     error=(out-goal);
#     rloss=-tf.reduce_sum(tf.square(myinput));#使控制量均方和最小
#     closs=(tf.abs(error)-maxerror)/errornorm;#计算梯度时约束值归一化              
#     totalloss=(-rloss+tf.reduce_sum((larglambda*closs+larglr*tf.square(closs)/2)*consign));
# totalgrad=tape.gradient(totalloss,myinput)
# iteropt.apply_gradients(zip([totalgrad],[myinput]))
# del tape