import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import random
import matplotlib.pyplot as plt
import seaborn as sns


# 环境类 (简单的格子世界)
class GridWorldEnv:
    def __init__(self, grid_size=5):
        self.grid_size = grid_size
        self.state = (0, 0)  # 从左上角开始
        self.goal = (4, 4)  # 目标在右下角

    def reset(self):
        self.state = (0, 0)
        return self.state

    def step(self, action):
        x, y = self.state
        if action == 0:  # 上
            x = max(0, x - 1)
        elif action == 1:  # 下
            x = min(self.grid_size - 1, x + 1)
        elif action == 2:  # 左
            y = max(0, y - 1)
        elif action == 3:  # 右
            y = min(self.grid_size - 1, y + 1)

        self.state = (x, y)

        if self.state == self.goal:
            return self.state, 0, True  # 达到目标，奖励为 0，结束
        else:
            return self.state, -1, False  # 每步奖励为 -1，未结束


# Q网络
class QNetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(QNetwork, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(state_dim, 64),
            nn.ReLU(),
            nn.Linear(64, action_dim)
        )

    def forward(self, state):
        return self.fc(state)


# Q学习
class QLearningAgent:
    def __init__(self, state_dim, action_dim, epsilon=0.1, gamma=0.99, alpha=0.001):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.epsilon = epsilon
        self.gamma = gamma
        self.alpha = alpha
        self.q_network = QNetwork(state_dim, action_dim)
        self.optimizer = optim.Adam(self.q_network.parameters(), lr=self.alpha)

    def select_action(self, state):
        # ε-贪婪策略
        if random.uniform(0, 1) < self.epsilon:
            return random.choice(range(self.action_dim))  # 随机选择
        else:
            with torch.no_grad():
                state_tensor = torch.tensor(state, dtype=torch.float32)
                q_values = self.q_network(state_tensor)
                return torch.argmax(q_values).item()  # 选择Q值最大的动作

    def update(self, state, action, reward, next_state, done):
        state_tensor = torch.tensor(state, dtype=torch.float32)
        next_state_tensor = torch.tensor(next_state, dtype=torch.float32)

        # 获取当前Q值
        q_values = self.q_network(state_tensor)
        q_value = q_values[action]

        if done:
            target = reward  # 终止状态
        else:
            next_q_values = self.q_network(next_state_tensor)
            target = reward + self.gamma * torch.max(next_q_values)  # 估计目标Q值

        # 计算损失
        loss = (q_value - target).pow(2).mean()

        # 更新网络
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()


# 训练过程
def train(agent, env, episodes=1000):
    best_reward = -float('inf')  # 记录最好的奖励
    best_episode = 0  # 最好奖励对应的周期
    rewards = []  # 保存每个周期的总奖励
    best_path = []  # 最佳路径

    for episode in range(episodes):
        state = env.reset()
        done = False
        total_reward = 0
        path = [state]  # 跟踪路径

        while not done:
            action = agent.select_action(state)
            next_state, reward, done = env.step(action)
            agent.update(state, action, reward, next_state, done)
            state = next_state
            path.append(state)  # 记录每一步的状态
            total_reward += reward

        rewards.append(total_reward)

        # 如果当前奖励是最好的，记录当前路径
        if total_reward > best_reward:
            best_reward = total_reward
            best_episode = episode
            best_path = path  # 保存最好的路径

        if episode % 100 == 0:
            print(f"Episode {episode}: Total Reward: {total_reward}")

    return best_reward, best_episode, rewards, best_path


# 可视化训练过程
def plot_rewards(rewards):
    plt.plot(rewards)
    plt.xlabel('Episodes')
    plt.ylabel('Total Reward')
    plt.title('Q-Learning Training Rewards')
    plt.show()


# 绘制最佳路径
def plot_path(best_path, grid_size=5):
    grid = np.zeros((grid_size, grid_size))
    for (x, y) in best_path:
        grid[x, y] = 1  # 将路径标记为 1

    plt.imshow(grid, cmap='Blues', interpolation='nearest')
    plt.colorbar(label='Path')

    # 绘制目标
    plt.plot(grid_size - 1, grid_size - 1, 'go', markersize=10)  # 目标点 (绿色)
    plt.title('Best Path')
    plt.show()


# 实例化环境和代理
env = GridWorldEnv()
agent = QLearningAgent(state_dim=2, action_dim=4)  # 状态是 (x, y)，动作是 4 个方向

# 训练代理
best_reward, best_episode, rewards, best_path = train(agent, env)

# 打印最好的结果
print(f"Best Reward: {best_reward}, at Episode: {best_episode}")

# 可视化训练过程中的奖励曲线
plot_rewards(rewards)

# 可视化最佳路径
plot_path(best_path)
