from OtherFortrain.SnakeGame import SnakeENV
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam

# 创建贪吃蛇游戏环境
env = SnakeENV.SnakeEnv()

# 定义状态空间和动作空间的大小
state_size = env.observation_space.shape[0]
action_size = env.action_space.n

# 定义DQN模型
model = Sequential()
# 考虑输入形状为 10 * state_size
model.add(Dense(24, input_dim=10 * state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(action_size, activation='linear'))
model.compile(loss='mse', optimizer=Adam(learning_rate=0.001))

# 定义训练参数
num_episodes = 10
batch_size = 32
gamma = 0.95

# 定义经验回放缓冲区
class ReplayBuffer:
    def __init__(self, capacity):
        self.capacity = capacity
        self.buffer = []
        self.position = 0

    def push(self, state, action, reward, next_state, done):
        if len(self.buffer) < self.capacity:
            self.buffer.append(None)
        self.buffer[self.position] = (state, action, reward, next_state, done)
        self.position = (self.position + 1) % self.capacity

    def sample(self, batch_size):
        batch = np.random.choice(len(self.buffer), batch_size, replace=False)
        state, action, reward, next_state, done = zip(*[self.buffer[i] for i in batch])
        return np.array(state), np.array(action), np.array(reward), np.array(next_state), np.array(done)

    def __len__(self):
        return len(self.buffer)

# 创建经验回放缓冲区
replay_buffer = ReplayBuffer(capacity=10000)

# 训练循环
for episode in range(num_episodes):
    state = env.reset()
    # 调整 state 形状为 (1, 10 * state_size)
    state = state.reshape(1, -1)
    done = False
    time_step = 0

    while not done:
        # 选择动作
        action = np.argmax(model.predict(state)[0])

        # 执行动作并获取下一个状态、奖励和是否结束
        next_state, reward, done, _ = env.step(action)
        # 调整 next_state 形状为 (1, 10 * state_size)
        next_state = next_state.reshape(1, -1)

        # 将经验存储到回放缓冲区
        replay_buffer.push(state, action, reward, next_state, done)

        # 更新状态
        state = next_state
        time_step += 1

        # 如果回放缓冲区中有足够的样本，则进行训练
        if len(replay_buffer) >= batch_size:
            # 从回放缓冲区中采样一批样本
            states, actions, rewards, next_states, dones = replay_buffer.sample(batch_size)

            # 调整 states 和 next_states 形状为 (batch_size, 10 * state_size)
            states = states.reshape(batch_size, -1)
            next_states = next_states.reshape(batch_size, -1)

            # 计算目标Q值
            targets = rewards + gamma * np.amax(model.predict(next_states), axis=1) * (1 - dones)

            # 更新模型
            targets_f = model.predict(states)
            targets_f[np.arange(batch_size), actions] = targets
            model.fit(states, targets_f, epochs=1, verbose=0)

    print(f"Episode: {episode + 1}/{num_episodes}, Score: {time_step}")

# 保存训练好的模型
model.save('D:/Project/Pycharm Project/pythonProject4/models/snake_model.h5')

# 关闭环境
env.close()