from torch import nn, optim
import numpy as np
#构建神经网络
class QNet(nn.Sequential):
    def __init__(self,input_size,output_size):
        super(QNet, self).__init__(
            #使用一个包含五层网络的神经网络
            #第一层为线性的输入层
            nn.Linear(input_size, 256),
            #第二层使用relu激活函数
            nn.ReLU(),
            #第三层为线性的隐藏层
            nn.Linear(256, 128),
            #第四层使用relu激活函数
            nn.ReLU(),
            #第五层为线性的输出
            nn.Linear(128, output_size)
            #输出结果为0,ouputsize  动作
        )

class Qtable():
    def __init__(self,action_space,state_size,e_greedy=0.1,learning_rate=0.01,reward_decay=0.9):
        self.action_space = action_space
        self.state_size = state_size
        self.qtable = np.zeros((self.state_size, self.action_space))

        self.reward_decay = reward_decay
        self.e_greedy = e_greedy
        self.learning_rate = learning_rate

    def choose_action(self, observation):
        if np.random.random() < self.e_greedy:
            action = np.random.choice(self.action_space)
        else:
            SA = self.qtable[observation]

            action = np.random.choice(np.where(SA == np.max(SA))[0])
        return action

    def learn(self, state, action, reward, next_state, done):
        q_cur = self.qtable[state, action]
        q_new = reward + self.reward_decay * np.max(self.qtable[next_state])
        self.qtable[state, action] += self.learning_rate * (q_new - q_cur)

