import numpy as np 

#import gym 
#设定最大迭代步数
generations = 1000
#进化种群数量
population_size = 100
#观察状态数量，相对空间状态3×4+敌方存活数量1+我方存活数1+我方弹药数1=15
observation_size = 15

#定义我方武器数量
SAN_NUMBER = 4

#定义UAV存活数量
UAV_Alive = 4

#定义我方是否存活
IsAlive = True

#设定不同的终止条件，给予不同的终止回报
def R_Done():
    #返回终止回报值
    return 1*IsAlive-0.1*UAV_Alive

def relu(x):
    return (x > 0) * x

def model(inp, weights):
    fc1 = relu(np.dot(inp, weights[0]))
    fc2 = np.dot(fc1, weights[1])

    return fc2

def main():
   #env = gym.make('CartPole-v0')
   #初始化****************网络的权重，浅层网络需要修改
    base_weight = [0.01 * np.random.randn(15, 35), 0.01 * np.random.randn(35, 5)]
   #初始化***********噪声，,噪声服从正态分布，尺度为20%，需要修改维度，种群从噪声开始
    gaussian = [[np.random.randn(15, 35) * 0.2, np.random.randn(35, 5) * 0.2] for a in range (population_size)]
    #初始化**************扰动过后的权重，每个种群不同
    perturbed_weights = [[gaussian[a][0] + base_weight[0], gaussian[a][1] + base_weight[1]] for a in range (population_size)]
    
    #运行循环，获取环境反馈
    for generation in range(generations):
        #定义累计回报
        accumulated_rewards = [] 
        #定义平均回报
        average_r = 0
        #处理种群，在每一个迭代步中计算噪声动作的回报
        for agent in range (population_size):
            #初始化权重与回报
            w = perturbed_weights[agent]
            r = 0
            
            #开始环境，获取状态
            observation = env.reset()
            for t in range (200):
                #每100代，10个种群进行渲染
              # if generation % 100 == 0 and agent % 10 == 0:
              #     env.render() 
              #每个种群采取动作，贪婪规则选择动作
                action = np.argmax(model(observation, w))
                
              #步进，并返回观测状态、回报值、终止信息
                observation, reward, done, info = env.step(action)
                #累计回报
                r += reward / 10.0
                
               #终止回报，需重新定义
                if done:
                    #此处需处理done信息
                    IsAlive =
                    UAV_Alive =
                    r -= R_Done()
                    break;
            
            #if agent % 5 == 0:
            #    print "Completed run for agent -- " + str(agent) + ' ( generation ' + str(generation) + ' )' + str(r)
            
            accumulated_rewards.append(r)
            average_r += r
            k=average_r/population_size
        print("erage reward=%f"%k)
         

        average_1 = 0
        average_2 = 0
        
        average_accumulated_rewards = 0
        #根据累进收益计算种群收益
        for agent in range (population_size):
            average_1 += accumulated_rewards[agent] * gaussian[agent][0]
            average_2 += accumulated_rewards[agent] * gaussian[agent][1]
        #收益
        average_1 /= population_size
        average_2 /= population_size 
        
        #收益分别加到两层权值上
        base_weight[0] = base_weight[0] + 0.02 * average_1 * (generations - generation)/generations 
        base_weight[1] = base_weight[1] + 0.02 * average_2 * (generations - generation)/generations 

        np.save('baseweight_0c.npy', base_weight[0])
        np.save('baseweight_1c.npy', base_weight[1])

        gaussian = [[np.random.randn(15, 35) * 0.03, np.random.randn(35, 5) * 0.03] for a in range (population_size)]
        perturbed_weights = [[gaussian[a][0] + base_weight[0], gaussian[a][1] + base_weight[1]] for a in range (population_size)]

main()
