#Author : AcerMo
#date 2024-11-18
from Train import game
'''
CartPole是gym库中一个经典的算例
要求我们左右水平移动小车来使车上的一根木棍保持竖直静止
状态描述参数为一个包括四个元素的向量[x,v,theta,omega]
能进行的动作有两个，左移或者右移
只要木棍偏向角不大于12，车辆不跑出边界就算存活，只要存活就会有奖励，
模型满足连续状态与离散动作空间
因此可以使用DQN来进行求解
'''
#使用封装的神经网络与训练器
G = game(max_iter=10000,memory_size=10000,e_greedy=0.1,name='CartPole-v1',insize=4,outsize=2,loss_fn='MSE',optimizer='Adam')
G.train(-500,150)

'''
#环境测试代码
env = gym.make('CartPole-v1', render_mode="human")
for episode in range(10):
    env.reset()
    print("Episode finished after {} timesteps".format(episode))
    for ik in range(10):
        env.render()
        observation, reward, done, info, _ = env.step(env.action_space.sample())
        print(observation, reward, done, info)
        if done:
            break
        time.sleep(0.02)
env.close()
'''
'''
def train(memory_size, e_greedy,train_iter):
    avg = 0
    num_train = 0
    show_me = False
    #平滑每次训练的结果值，当训练效果较好时，开始进行展示
    while num_train < train_iter:
        state = env.reset()[0]
        s = torch.tensor(state, dtype=torch.float32)
        s = s.unsqueeze(0)
        QSA = net(s)
        Reward = 0
        while True:
            if show_me:
                env.render()
            #经验池还没有满，那么随机选择动作扩充经验池
            if len(memory) < memory_size:
                action = env.action_space.sample()
            else:
                #在探索次数逐渐增多的过程中，值应该越发收敛，这时应减少随机探索的概率
                e_greedy += 1e-7
                memory.pop(0)
                #删除最早的经验
                #以一定概率采取随机策略
                if random.random() < e_greedy:
                    action = env.action_space.sample()
                else :
                    s = torch.tensor(state, dtype=torch.float32)
                    s = s.unsqueeze(0)
                    QSA = net(s)
                    action = torch.argmax(Qs, 1)[0].item()
            new_state, reward, done, _ , info= env.step(action)
            Reward += reward
            memory.append((s, action, reward, new_state, done))
            state = new_state
            if done:
                num_train += 1
                avg = 0.95 * avg + 0.05 * Reward
                if avg > 300000:
                    show_me = True
                break
        if len(memory) < memory_size:
            continue
        exps = random.choices(memory, k=1000)
        #选取100条历史经验
        #状态转换成100*4的张量
        states = torch.tensor([exp[0] for exp in exps]).float()
        nstates = torch.tensor([exp[3] for exp in exps]).float()
        #其他值转化为100*1的张量
        rewards = torch.tensor([[exp[1]] for exp in exps])
        actions = torch.tensor([[exp[2]] for exp in exps])
        dones = torch.tensor([[exp[4]] for exp in exps])

        QSAs = net(states)
        Qs = torch.gather(QSAs, 1, actions)
        #这里指取出actions对应的维度的结果值

        NQSAs = net(nstates)
        maxQ = torch.max(NQSAs, dim=1, keepdim=True)[0]
        target_Q = rewards + (1.0 - dones) * maxQ * 0.9

        loss = loss_fn(Qs, target_Q.detach())
        #detach会避免target的梯度进行传播
        opt.zero_grad()
        loss.backward()
        opt.step()
    return

env = gym.make('CartPole-v1', render_mode="human")
net = QNet(4,2)
loss_fn = nn.MSELoss()
opt = optim.Adam(net.parameters())

memory = []
memory_size = 100000
e_greedy = 0.2
train_iter = 100000
train(memory_size, e_greedy,train_iter)

'''