import gym
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models

class DQN(models.Model):
    def __init__(self, state_dim, action_dim):
        super(DQN, self).__init__()
        self.model = models.Sequential([
            layers.Dense(64, activation='relu', input_dim=state_dim),
            layers.Dense(64, activation='relu'),
            layers.Dense(action_dim)
        ])

    def call(self, state):
        return self.model(state)

# 环境和训练
env = gym.make('CartPole-v1')
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n

dqn = DQN(state_dim, action_dim)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
replay_buffer = []

def train_dqn():
    for episode in range(100):
        state = env.reset()
        done = False
        while not done:
            action = np.argmax(dqn(tf.expand_dims(state, axis=0)))
            next_state, reward, done, _ = env.step(action)
            replay_buffer.append((state, action, reward, next_state, done))
            state = next_state

            if len(replay_buffer) > 1000:
                batch = np.random.choice(replay_buffer, size=32)
                states, actions, rewards, next_states, dones = zip(*batch)
                states = np.array(states)
                next_states = np.array(next_states)
                actions = np.array(actions)
                rewards = np.array(rewards)
                dones = np.array(dones)

                with tf.GradientTape() as tape:
                    q_values = dqn(states)
                    next_q_values = dqn(next_states)
                    target_q_values = rewards + 0.99 * tf.reduce_max(next_q_values, axis=1) * (1 - dones)
                    loss = tf.reduce_mean(tf.square(target_q_values - tf.gather_nd(q_values, actions)))

                gradients = tape.gradient(loss, dqn.trainable_variables)
                optimizer.apply_gradients(zip(gradients, dqn.trainable_variables))

train_dqn()