import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import random
from collections import deque
import gym
import matplotlib.pyplot as plt


# 定义一个简单的神经网络
class QNetwork(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(QNetwork, self).__init__()
        self.fc1 = nn.Linear(input_dim, 128)
        self.fc2 = nn.Linear(128, 256)
        self.fc3 = nn.Linear(256, output_dim)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        return self.fc3(x)


# 定义优先经验回放缓冲区
class PrioritizedReplayBuffer:
    def __init__(self, capacity, alpha=0.6):
        self.capacity = capacity
        self.alpha = alpha
        self.buffer = deque(maxlen=capacity)
        self.priorities = deque(maxlen=capacity)
        self.max_priority = 1.0

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))
        self.priorities.append(self.max_priority)

    def _sample_proportional(self, batch_size):
        if len(self.buffer) < batch_size:
            raise ValueError("Buffer does not have enough samples for sampling.")
        total_priority = sum(np.power(self.priorities, self.alpha))
        probs = np.array([np.power(p, self.alpha) / total_priority for p in self.priorities])
        probs = probs / np.sum(probs)
        # 检查概率值是否在 [0, 1] 范围内
        if np.any(probs < 0) or np.any(probs > 1):
            raise ValueError("Invalid probability values in probs.")
        indices = np.random.choice(len(self.buffer), size=batch_size, p=probs)
        return indices

    def sample(self, batch_size, beta=0.4):
        indices = self._sample_proportional(batch_size)
        states, actions, rewards, next_states, dones = [], [], [], [], []
        weights = []
        p_min = min(self.priorities) / sum(self.priorities)
        max_weight = (p_min * len(self.buffer)) ** (-beta)

        for idx in indices:
            state, action, reward, next_state, done = self.buffer[idx]
            states.append(state)
            actions.append(action)
            rewards.append(reward)
            next_states.append(next_state)
            dones.append(done)
            p_sample = self.priorities[idx] / sum(self.priorities)
            weight = (p_sample * len(self.buffer)) ** (-beta)
            weights.append(weight / max_weight)

        states = np.array(states)
        actions = np.array(actions)
        rewards = np.array(rewards)
        next_states = np.array(next_states)
        dones = np.array(dones)
        weights = np.array(weights)

        return states, actions, rewards, next_states, dones, indices, weights

    def update_priorities(self, indices, priorities):
        for idx, priority in zip(indices, priorities):
            self.priorities[idx] = priority
            self.max_priority = max(self.max_priority, priority)

    def __len__(self):
        return len(self.buffer)


# 定义智能体
class Agent:
    def __init__(self, input_dim, output_dim, lr=0.0005, gamma=0.99, epsilon=1.0, epsilon_decay=0.999,
                 epsilon_min=0.005,
                 buffer_capacity=100000, batch_size=64, target_update_freq=20, noise_std=0.1):
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.lr = lr
        self.gamma = gamma
        self.epsilon = epsilon
        self.epsilon_decay = epsilon_decay
        self.epsilon_min = epsilon_min
        self.buffer = PrioritizedReplayBuffer(buffer_capacity)
        self.batch_size = batch_size
        self.target_update_freq = target_update_freq
        self.train_step = 0
        self.noise_std = noise_std

        self.q_network = QNetwork(input_dim, output_dim)
        self.target_network = QNetwork(input_dim, output_dim)
        self.q_network.to(device)
        self.target_network.to(device)
        self.target_network.load_state_dict(self.q_network.state_dict())
        self.optimizer = optim.Adam(self.q_network.parameters(), lr=lr)
        self.criterion = nn.MSELoss()

    def act(self, state):
        if np.random.rand() <= self.epsilon:
            return random.randrange(self.output_dim)
        state = torch.FloatTensor(state).unsqueeze(0).to(device)
        q_values = self.q_network(state)
        action = torch.argmax(q_values, dim=1).item()

        # 添加高斯噪声
        noise = np.random.normal(0, self.noise_std)
        action += noise
        action = np.clip(action, 0, self.output_dim - 1).astype(int)

        return action

    def step(self, state, action, reward, next_state, done):
        self.buffer.add(state, action, reward, next_state, done)
        if len(self.buffer) >= self.batch_size:
            self.learn()

    def learn(self):
        states, actions, rewards, next_states, dones, indices, weights = self.buffer.sample(self.batch_size)

        states = torch.FloatTensor(states).to(device)
        actions = torch.LongTensor(actions).unsqueeze(1).to(device)
        rewards = torch.FloatTensor(rewards).unsqueeze(1).to(device)
        next_states = torch.FloatTensor(next_states).to(device)
        dones = torch.FloatTensor(dones).unsqueeze(1).to(device)
        weights = torch.FloatTensor(weights).unsqueeze(1).to(device)

        q_values = self.q_network(states).gather(1, actions)
        next_actions = self.q_network(next_states).argmax(1, keepdim=True)
        next_q_values = self.target_network(next_states).gather(1, next_actions)
        target_q_values = rewards + (1 - dones) * self.gamma * next_q_values

        loss = (self.criterion(q_values, target_q_values) * weights).mean()
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        td_errors = (q_values - target_q_values).squeeze().detach().cpu().numpy()
        self.buffer.update_priorities(indices, np.abs(td_errors))

        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay

        self.train_step += 1
        if self.train_step % self.target_update_freq == 0:
            self.update_target_network()

    def update_target_network(self):
        self.target_network.load_state_dict(self.q_network.state_dict())


# 检查是否有可用的 GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


# 1.env:  reward ,state,
# 2.Agent


# 示例使用
if __name__ == "__main__":
    # 创建倒立摆环境
    env = gym.make('MountainCar-v0')
    input_dim = env.observation_space.shape[0]
    output_dim = env.action_space.n
    agent = Agent(input_dim, output_dim)

    num_episodes = 2000
    episode_rewards = []  # 用于存储每回合的总奖励
    for episode in range(num_episodes):
        state = env.reset()
        state = state[0]
        done = False
        total_reward = 0
        while not done:
            action = agent.act(state)
            next_state, reward, terminated, truncated, _ = env.step(action)
            done = terminated or truncated
            agent.step(state, action, reward, next_state, done)
            state = next_state
            total_reward += reward
        episode_rewards.append(total_reward)
        print(f"Episode {episode + 1}: Total Reward = {total_reward}")

    env.close()

    # 绘制每回合总奖励曲线
    plt.plot(range(1, num_episodes + 1), episode_rewards)
    plt.xlabel('Episode')
    plt.ylabel('Total Reward')
    plt.title('Training Performance of Q - learning on CartPole - v1')
    plt.show()

