import pygame
import math
import gym
from gym import spaces
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim



class InvertedPendulumEnv(gym.Env):
    def __init__(self):
        super(InvertedPendulumEnv, self).__init__()

        self.screen_width = 400
        self.cart_width = 60
        self.cart_range = 2.4  # 小车移动范围
        self.pole_length = 100
        self.pole_thickness = 10
        self.cart_speed = 0.1
        self.pole_angle = 0.1
        self.pole_angular_velocity = 0
        self.max_pole_angle = 15 * math.pi / 180  # 最大倾斜角度限制

        self.action_space = spaces.Discrete(2)  # 两个动作：向左或向右
        self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(4,))

        self.cart_x = self.screen_width // 2

    def reset(self):
        # 重置游戏状态
        self.cart_x = self.screen_width // 2
        self.pole_angle = 0.1
        self.pole_angular_velocity = 0
        return [self.cart_x, self.pole_angle]

    def step(self, action):
        # 执行动作并返回下一个状态、奖励和是否完成的标志
        if action == 0:  # 向左移动
            self.cart_x -= self.cart_speed
            self.pole_angle += 0.0005
        elif action == 1:  # 向右移动
            self.cart_x += self.cart_speed
            self.pole_angle -= 0.0005
        done = (self.pole_angle > self.max_pole_angle or self.pole_angle < -self.max_pole_angle or
                self.cart_x < self.screen_width // 2 - self.cart_range * self.cart_width or
                self.cart_x > self.screen_width // 2 + self.cart_range * self.cart_width)
        reward = 1 if not done else 0
        return [self.cart_x, self.pole_angle], reward, done
    def run(self):
        self.pole_angle += self.pole_angular_velocity
        self.pole_angular_velocity = 0.000398 * math.sin(self.pole_angle)
# 定义深度Q网络
class DQN(nn.Module):
    def __init__(self, input_size, output_size):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(input_size, 128)
        self.fc2 = nn.Linear(128, 64)
        self.fc3 = nn.Linear(64, output_size)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        x = self.fc3(x)
        return x

# 初始化环境和模型
env = InvertedPendulumEnv()
input_size = 2
output_size = 2
model = DQN(input_size, output_size)
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练DQN
num_episodes = 100
gamma = 0.99
epsilon = 0.1


for episode in range(num_episodes):
    state = env.reset()
    model = model.to('cuda' if torch.cuda.is_available() else 'cpu')
    state = torch.tensor(state, dtype=torch.float32).to('cuda' if torch.cuda.is_available() else 'cpu')
    total_reward = 0
    times = 0
    while times < 20000:
        if np.random.rand() < epsilon:
            action = env.action_space.sample()  # 随机选择动作
        else:
            q_values = model(state)
            action = q_values.argmax().item()
        env.run()
        next_state, reward, done = env.step(action)
        next_state = torch.tensor(next_state, dtype=torch.float32)

        q_values = model(state)
        next_q_values = model(next_state)
        target = reward + gamma * next_q_values.max()

        loss = nn.MSELoss()(q_values[action], target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        total_reward += reward
        state = next_state
        times += 1
        if done:
            break
    print(f"Episode {episode + 1}, Total Reward: {total_reward}")
torch.save(model.state_dict(), 'rlmodel(2).pth')
env.close()
