import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

class SimplePVZEnv:
    def __init__(self):
        # 定义状态空间，这里用 10x10 的矩阵表示游戏状态
        self.state = np.zeros((10, 10))
        # 定义动作空间，假设有 5 种不同的动作
        self.action_space = 5
        self.max_steps = 100  # 最大步数限制
        self.current_step = 0

    def reset(self):
        # 重置环境状态
        self.state = np.zeros((10, 10))
        self.current_step = 0
        return self.state

    def step(self, action):
        # 执行动作并更新状态
        if action == 0:
            self.state[0, 0] = 1
            reward = 1
        else:
            reward = -1

        # 简单判断游戏是否结束
        done = np.sum(self.state) > 5 or self.current_step >= self.max_steps
        self.current_step += 1
        return self.state, reward, done

def build_dqn_model(state_shape, action_space):
    model = Sequential([
        Dense(32, activation='relu', input_shape=state_shape),
        Dense(32, activation='relu'),
        Dense(action_space, activation='linear')
    ])
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
                  loss='mse')
    return model

def train_dqn(env, model, num_episodes=100, gamma=0.95, epsilon=0.1):
    for episode in range(num_episodes):
        state = env.reset()
        state = state.flatten()  # 将状态展平为一维数组
        done = False
        while not done:
            if np.random.rand() < epsilon:
                action = np.random.randint(env.action_space)  # 随机选择动作
            else:
                q_values = model.predict(state.reshape(1, -1))
                action = np.argmax(q_values)

            # 执行动作并获取下一状态、奖励和是否结束的信息
            next_state, reward, done = env.step(action)
            next_state = next_state.flatten()

            # 计算目标 Q 值
            target = reward
            if not done:
                next_q_values = model.predict(next_state.reshape(1, -1))
                target += gamma * np.max(next_q_values)

            # 更新目标 Q 值
            target_q_values = q_values.copy()
            target_q_values[0][action] = target

            # 训练模型
            model.fit(state.reshape(1, -1), target_q_values, epochs=1, verbose=0)

            state = next_state

        if episode % 10 == 0:
            print(f"Episode {episode} completed.")

if __name__ == "__main__":
    env = SimplePVZEnv()
    state_shape = (10 * 10,)  # 展平后的状态维度
    action_space = env.action_space
    model = build_dqn_model(state_shape, action_space)
    train_dqn(env, model)