import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import random
from collections import deque
import gym
import matplotlib.pyplot as plt
import imageio

# 检查是否有可用的 GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


# 定义一个简单的神经网络
class QNetwork(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(QNetwork, self).__init__()
        self.fc1 = nn.Linear(input_dim, 128)
        self.fc2 = nn.Linear(128, 256)
        self.fc3 = nn.Linear(256, output_dim)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        return self.fc3(x)


# 定义经验回放缓冲区
class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, batch_size)
        states, actions, rewards, next_states, dones = zip(*batch)
        return np.array(states), np.array(actions), np.array(rewards), np.array(next_states), np.array(dones)

    def __len__(self):
        return len(self.buffer)


# 定义智能体
class Agent:
    def __init__(self,
                 input_dim,
                 output_dim,
                 lr=0.001,
                 gamma=0.95,
                 epsilon=1.0,
                 epsilon_decay=0.99,
                 epsilon_min=0.001,
                 buffer_capacity=100000,
                 batch_size=64,
                 target_update_freq=20,
                 tau=0.005
                 ):
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.lr = lr
        self.gamma = gamma
        self.epsilon = epsilon
        self.epsilon_decay = epsilon_decay
        self.epsilon_min = epsilon_min
        self.buffer = ReplayBuffer(buffer_capacity)
        self.batch_size = batch_size
        self.target_update_freq = target_update_freq
        self.train_step = 0
        self.tau = tau

        self.q_network = QNetwork(input_dim, output_dim).to(device)
        self.target_network = QNetwork(input_dim, output_dim).to(device)
        self.target_network.load_state_dict(self.q_network.state_dict())
        self.optimizer = optim.Adam(self.q_network.parameters(), lr=lr)
        self.criterion = nn.MSELoss()

    def act(self, state):
        if np.random.rand() <= self.epsilon:
            return random.randrange(self.output_dim)
        state = torch.FloatTensor(state).unsqueeze(0).to(device)
        q_values = self.q_network(state)
        action = torch.argmax(q_values, dim=1).item()
        return action

    def step(self, state, action, reward, next_state, done):
        self.buffer.add(state, action, reward, next_state, done)

        # 每一次都会学习，但是目标网络更新频率低
        if len(self.buffer) >= self.batch_size:
            self.learn()

    def learn(self):
        states, actions, rewards, next_states, dones = self.buffer.sample(self.batch_size)
        states = torch.FloatTensor(states).to(device)
        actions = torch.LongTensor(actions).unsqueeze(1).to(device)
        rewards = torch.FloatTensor(rewards).unsqueeze(1).to(device)
        next_states = torch.FloatTensor(next_states).to(device)
        dones = torch.FloatTensor(dones).unsqueeze(1).to(device)

        self.q_network.eval()
        with torch.no_grad():

            # Dual
            next_q_values_eval = self.q_network(next_states)
            best_actions = torch.argmax(next_q_values_eval, dim=1, keepdim=True)
            # 使用目标网络评估动作价值
            next_q_values_target = self.target_network(next_states)
            max_next_q_values = next_q_values_target.gather(1, best_actions)
            target_q_values = rewards + (1 - dones) * self.gamma * max_next_q_values

        self.q_network.train()
        q_values = self.q_network(states).gather(1, actions)
        loss = self.criterion(q_values, target_q_values)

        # nn.utils.clip_grad_norm_(self.q_network.parameters(), max_norm=1.0)

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        # 每一次学习后，衰减探索率
        #
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay

        self.train_step += 1
        # 硬更新逻辑
        if self.train_step % self.target_update_freq == 0:
            self.update_target_network()
        # self.soft_update_target_network()

    def soft_update_target_network(self):
        for target_param, local_param in zip(self.target_network.parameters(), self.q_network.parameters()):
            target_param.data.copy_(self.tau * local_param.data + (1 - self.tau) * target_param.data)

    def update_target_network(self):
        self.target_network.load_state_dict(self.q_network.state_dict())


# 示例使用
if __name__ == "__main__":
    env_name = 'CartPole-v1'
    env = gym.make(env_name)
    input_dim = env.observation_space.shape[0]
    output_dim = env.action_space.n
    agent = Agent(input_dim, output_dim)

    num_episodes = 200
    episode_rewards = []
    for episode in range(num_episodes):
        state = env.reset()
        state = state[0]
        done = False
        total_reward = 0
        while not done:
            action = agent.act(state)
            next_state, reward, terminated, truncated, _ = env.step(action)
            done = terminated
            agent.step(state, action, reward, next_state, done)
            state = next_state
            total_reward += reward
        episode_rewards.append(total_reward)
        print(f"Episode {episode + 1}: Total Reward = {total_reward}")

    env.close()

    # 绘制每回合总奖励曲线
    plt.plot(range(1, num_episodes + 1), episode_rewards)
    plt.xlabel('Episode')
    plt.ylabel('Total Reward')
    plt.title('Training Performance of DQN on CartPole - v1')
    plt.show()

    # 重新创建环境用于渲染
    env = gym.make(env_name,render_mode='rgb_array')
    frames = []
    state = env.reset()
    state = state[0]
    done = False
    max_steps = 10000  # 设置一个较大的最大步数
    step_count = 0
    while  step_count < max_steps:  # 增加步数限制条件
        action = agent.act(state)
        next_state, reward, terminated, truncated, _ = env.step(action)
        done = terminated or truncated
        if done:
            break
        frame = env.render()
        if frame is not None:  # 检查返回值是否为 None
            frames.append(frame)
        state = next_state
        step_count += 1

    env.close()

    # 保存为视频
    epoch = num_episodes
    imageio.mimsave(f"./dqn_{env_name}_{epoch}_video.mp4", frames, fps=60)
    print(f"视频已保存为 ./dqn_{env_name}_{epoch}_video.mp4")
