import torch
import torch.nn as nn
import torch.optim as optim
import random
import matplotlib.pyplot as plt


# 自定义环境
class SimpleEnv:
    def __init__(self):
        self.state = 0.0  # 初始状态
        self.goal = 10.0  # 目标位置
        self.done = False

    def reset(self):
        self.state = 0.0
        self.done = False
        return self.state

    def step(self, action):
        if self.done:
            return self.state, 0, self.done  # 游戏结束，不再变化

        # 通过动作修改状态
        self.state += action  # 动作是 -1、0、1，控制移动方向
        reward = -abs(self.state - self.goal)  # 奖励是距离目标位置的负值

        # 如果距离目标很近，就结束
        if abs(self.state - self.goal) < 0.1:
            self.done = True
            reward = 10  # 达到目标时奖励较高

        return self.state, reward, self.done


# Q网络定义
class QNetwork(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(QNetwork, self).__init__()
        self.fc = nn.Linear(input_dim, 24)
        self.fc2 = nn.Linear(24, output_dim)

    def forward(self, x):
        x = torch.relu(self.fc(x))
        x = self.fc2(x)
        return x


# DQN智能体
class DQN:
    def __init__(self, env, gamma=0.99, epsilon=0.1, batch_size=32, learning_rate=1e-3):
        self.env = env
        self.gamma = gamma
        self.epsilon = epsilon
        self.batch_size = batch_size
        self.learning_rate = learning_rate

        self.input_dim = 1  # 因为环境状态是一个单一的数值
        self.output_dim = 3  # 动作空间大小：-1, 0, 1

        self.q_network = QNetwork(self.input_dim, self.output_dim)
        self.optimizer = optim.Adam(self.q_network.parameters(), lr=self.learning_rate)
        self.criterion = nn.MSELoss()

    def select_action(self, state):
        if random.random() < self.epsilon:
            return random.choice([-1, 0, 1])  # 随机选择动作
        state = torch.tensor(state, dtype=torch.float32).unsqueeze(0)
        with torch.no_grad():
            q_values = self.q_network(state)
        # 将动作值 -1, 0, 1 转换为索引 0, 1, 2
        action_idx = torch.argmax(q_values, dim=1).item()
        action_map = [-1, 0, 1]  # -1 -> 0, 0 -> 1, 1 -> 2
        return action_map[action_idx]

    def update(self, state, action, reward, next_state, done):
        state = torch.tensor(state, dtype=torch.float32).unsqueeze(0)
        next_state = torch.tensor(next_state, dtype=torch.float32).unsqueeze(0)
        # 将动作 -1, 0, 1 转换为索引 0, 1, 2
        action_map = [-1, 0, 1]
        action_idx = action_map.index(action)
        action = torch.tensor(action_idx, dtype=torch.long).unsqueeze(0)
        reward = torch.tensor(reward, dtype=torch.float32).unsqueeze(0)

        # 确保done是Python标准bool类型
        done = torch.tensor(done, dtype=torch.float32).unsqueeze(0)

        # 计算目标Q值
        with torch.no_grad():
            next_q_values = self.q_network(next_state)
            next_q_value = next_q_values.max(1)[0]
            target_q_value = reward + self.gamma * next_q_value * (1 - done)

        # 获取当前Q值
        q_values = self.q_network(state)
        action_q_values = q_values.gather(1, action.unsqueeze(1)).squeeze(1)

        # 计算损失并更新Q网络
        loss = self.criterion(action_q_values, target_q_value)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

    def train(self, num_episodes=200):
        rewards = []
        best_reward = -float('inf')  # 初始最好的奖励设为负无穷
        best_episode = 0

        for episode in range(num_episodes):
            state = self.env.reset()  # 获取初始状态
            total_reward = 0
            done = False
            while not done:
                action = self.select_action([state])
                next_state, reward, done = self.env.step(action)
                total_reward += reward

                # 更新Q网络
                self.update([state], action, reward, [next_state], done)

                state = next_state

            rewards.append(total_reward)
            # 记录最佳奖励和对应的episode
            if total_reward > best_reward:
                best_reward = total_reward
                best_episode = episode

            print(f"Episode {episode}, Total Reward: {total_reward}")

        # 打印最佳结果
        print(f"Best Reward: {best_reward} at Episode {best_episode}")

        # 绘制奖励图
        plt.plot(rewards)
        plt.title('Total Rewards per Episode')
        plt.xlabel('Episode')
        plt.ylabel('Total Reward')

        # 在最佳位置添加标记
        plt.scatter(best_episode, best_reward, color='red', label=f"Best Reward at Episode {best_episode}")
        plt.legend()
        plt.show()


# 初始化环境和DQN智能体
env = SimpleEnv()
dqn = DQN(env)

# 训练智能体
dqn.train()
