import random
import gym
import numpy as np
import collections
import torch
import torch.nn.functional as F
import torch.nn as nn
import rl_utils
import huawei_cloud_upgrade as hcu
from collections import deque
import graph_neural_network as GNN
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv, global_mean_pool
from graph_neural_network import GNNEncoder
import math


class SimpleAttentionQNetworkGNN(torch.nn.Module):
    """简化版带注意力的Q网络，直接替换原版QNetworkGNN"""
    def __init__(self, graph_hid_dim, q_hid_dim, agent_feat_dim):
        super().__init__()
        
        # 特征注意力层 - 为输入特征分配权重
        self.feature_attention = nn.Sequential(
            nn.Linear(graph_hid_dim + agent_feat_dim, q_hid_dim),
            nn.ReLU(),
            nn.Linear(q_hid_dim, graph_hid_dim + agent_feat_dim),
            nn.Sigmoid()  # 输出0-1的注意力权重
        )
        
        # 原始的Q网络结构
        self.fc1 = torch.nn.Linear(graph_hid_dim + agent_feat_dim, q_hid_dim)
        self.fc2 = torch.nn.Linear(q_hid_dim, q_hid_dim)
        self.out = torch.nn.Linear(q_hid_dim, 1)
        
        # Dropout防止过拟合
        self.dropout = nn.Dropout(0.1)

    def forward(self, graph_emb, action_feats):
        """
        完全兼容原版接口：
        graph_emb: 
           - 推理时: Tensor of shape (graph_hid_dim,)
           - 批量时: Tensor of shape (B, graph_hid_dim)
        action_feats: 
           - 推理时: Tensor of shape (K, agent_feat_dim)
           - 更新时: Tensor of shape (B, agent_feat_dim)
        返回 q_values: 
           - 推理: Tensor of shape (K,)
           - 更新: Tensor of shape (B,)
        """
        # 处理不同的输入情况
        if graph_emb.dim() == 1:
            # 推理分支：扩成 (K, graph_hid_dim)
            K = action_feats.size(0)
            graph_rep = graph_emb.unsqueeze(0).expand(K, -1)
        else:
            # 批量分支：graph_emb already (B, graph_hid_dim), action_feats is also (B, D)
            graph_rep = graph_emb

        # 拼接特征
        combined_feats = torch.cat([graph_rep, action_feats], dim=1)  # (N, graph_hid_dim+agent_feat_dim)
        
        # 计算注意力权重
        attention_weights = self.feature_attention(combined_feats)  # (N, graph_hid_dim+agent_feat_dim)
        
        # 应用注意力权重
        attended_feats = combined_feats * attention_weights  # 元素级别的权重
        
        # 通过Q网络
        h = F.relu(self.fc1(attended_feats))
        h = self.dropout(h)
        h = F.relu(self.fc2(h))
        
        return self.out(h).view(-1)  # (N,)


class SimpleAttentionGNNDQNAgent:
    """简化版注意力Agent，直接替换原版GNNDQNAgent"""
    def __init__(self,
                 graph_dim: int,
                 graph_hid_dim: int,
                 q_net_hid_dim: int,
                 agent_feat_dim: int,
                 lr: float = 1e-3,
                 gamma: float = 0.99,
                 eps: float = 0.1,
                 target_update: int = 200,
                 device: str = 'cpu'):
        
        self.device = torch.device(device)
        
        # 1) GNN 编码器（保持不变）
        from graph_neural_network import GNNEncoder
        self.gnn = GNNEncoder(in_dim=graph_dim, hid_dim=graph_hid_dim).to(self.device)
        
        # 2) 带注意力的Q网络
        self.net = SimpleAttentionQNetworkGNN(
            graph_hid_dim=graph_hid_dim,
            q_hid_dim=q_net_hid_dim,
            agent_feat_dim=agent_feat_dim
        ).to(self.device)
        
        # 3) 目标网络
        self.target_net = SimpleAttentionQNetworkGNN(
            graph_hid_dim=graph_hid_dim,
            q_hid_dim=q_net_hid_dim,
            agent_feat_dim=agent_feat_dim
        ).to(self.device)
        self.target_net.load_state_dict(self.net.state_dict())

        # 其他参数保持完全相同
        self.opt = torch.optim.Adam(self.net.parameters(), lr=lr)
        self.gamma = gamma
        self.eps = eps
        self.target_update = target_update
        
        self.buffer = ReplayBuffer(10000)
        self.batch_size = 64
        self.step_cnt = 0

    def select_action(self, graph_data, agent_feats, is_training=True):
        """与原版完全相同的接口"""
        K = agent_feats.shape[0]
        if is_training and torch.rand(1).item() < self.eps:
            return torch.randint(0, K, (1,)).item()
        with torch.no_grad():
            graph_emb = self.gnn(graph_data)       # (hid_dim,)
            q_vals    = self.net(graph_emb, agent_feats)  # (K,)
        return int(q_vals.argmax().item())

    def store(self, graph_data, agent_feats, action_idx, reward, next_graph_data, next_agent_feats, done):
        """与原版完全相同的接口"""
        self.buffer.add(graph_data, agent_feats, action_idx, reward, next_graph_data, next_agent_feats, done)

    def update(self):
        """与原版完全相同的逻辑"""
        if len(self.buffer) < self.batch_size:
            return

        # 1) 从 buffer 中采样
        g_data, a_feats, acts, rews, ng_data, na_feats, dones = \
            self.buffer.sample(self.batch_size)

        # 2) 变成张量
        graph_embs = torch.stack([self.gnn(data) for data in g_data], dim=0)
        next_graph_embs = torch.stack([self.gnn(data) for data in ng_data], dim=0)
        
        batch_feats = torch.stack([feats[idx] for feats, idx in zip(a_feats, acts)]).to(self.device)  # (B, D)
        batch_next_feats = na_feats
        
        batch_acts  = torch.tensor(acts, dtype=torch.long, device=self.device)   # (B,)
        batch_rews  = torch.tensor(rews, dtype=torch.float, device=self.device)  # (B,)
        batch_dones = torch.tensor(dones, dtype=torch.float, device=self.device) # (B,)

        # 3) 当前 Q
        q_pred = self.net(graph_embs, batch_feats)  # (B,)
        
        # 4) 下步目标 Q
        next_q_max = []
        with torch.no_grad():
            for emb, nf in zip(next_graph_embs, batch_next_feats):
                nf_tensor = torch.tensor(nf, dtype=torch.float32, device=self.device)
                qn = self.target_net(emb, nf_tensor)  # (K',)
                next_q_max.append(qn.max().item())
        next_q_max = torch.tensor(next_q_max, dtype=torch.float, device=self.device)  # (B,)

        # 5) 目标
        q_target = batch_rews + self.gamma * next_q_max * (1 - batch_dones)

        # 6) 更新
        loss = F.mse_loss(q_pred, q_target)
        self.opt.zero_grad()
        loss.backward()
        self.opt.step()

        # 7) 更新 target net
        self.step_cnt += 1
        if self.step_cnt % self.target_update == 0:
            self.target_net.load_state_dict(self.net.state_dict())


# 保存和加载函数（兼容原版）
def save_attention_agent(agent: SimpleAttentionGNNDQNAgent, path: str):
    """保存注意力agent"""
    checkpoint = {
        'gnn_state_dict':        agent.gnn.state_dict(),
        'qnet_state_dict':       agent.net.state_dict(),
        'target_qnet_state_dict':agent.target_net.state_dict(),
        'optimizer_state_dict':  agent.opt.state_dict(),
        'epsilon':               agent.eps,
        'step_count':            agent.step_cnt,
    }
    torch.save(checkpoint, path)
    print(f"Saved attention agent to {path}")

def load_attention_agent(agent: SimpleAttentionGNNDQNAgent, path: str, map_location=None):
    """加载注意力agent"""
    checkpoint = torch.load(path, map_location=map_location)
    agent.gnn.load_state_dict(          checkpoint['gnn_state_dict'])
    agent.net.load_state_dict(          checkpoint['qnet_state_dict'])
    agent.target_net.load_state_dict(   checkpoint['target_qnet_state_dict'])
    agent.opt.load_state_dict(          checkpoint['optimizer_state_dict'])
    agent.eps     = checkpoint.get('epsilon',    agent.eps)
    agent.step_cnt= checkpoint.get('step_count', agent.step_cnt)
    print(f"Loaded attention agent from {path}")


##########################       以下是dqngnn代码        ############################
##########################       以下是dqngnn代码        ############################
##########################       以下是dqngnn代码        ############################
##########################       以下是dqngnn代码        ############################




class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = collections.deque(maxlen=capacity)

    def add(self, graph_data, action_feats, action_idx, reward, next_graph_data, next_action_feats, done):
        self.buffer.append((graph_data, action_feats, action_idx, reward,
                            next_graph_data, next_action_feats, done))

    def sample(self, batch_size):
        transitions = random.sample(self.buffer, batch_size)
        g_data, a_feats, acts, rews, ng_data, na_feats, dones = zip(*transitions)
        # 返回 7 个 tuple，长度都是 batch_size
        return g_data, a_feats, acts, rews, ng_data, na_feats, dones

    def __len__(self):
        return len(self.buffer)


class QNetworkGNN(torch.nn.Module):
    def __init__(self, graph_hid_dim, q_hid_dim, agent_feat_dim):
        super().__init__()
        # 输入维度就是 graph_hid_dim + agent_feat_dim
        self.fc1 = torch.nn.Linear(graph_hid_dim + agent_feat_dim, q_hid_dim)
        self.fc2 = torch.nn.Linear(q_hid_dim, q_hid_dim)
        self.out = torch.nn.Linear(q_hid_dim, 1)

    def forward(self, graph_emb, action_feats):
        """
        graph_emb: 
           - 推理(select_action)时: Tensor of shape (graph_hid_dim,)
           - 批量(update)时: Tensor of shape (B, graph_hid_dim)
        action_feats: 
           - 推理时: Tensor of shape (K, agent_feat_dim)
           - 更新时: Tensor of shape (B, agent_feat_dim)
        返回 q_values: 
           - 推理: Tensor of shape (K,)
           - 更新: Tensor of shape (B,)
        """
        # 如果是一维，就当做单图推理
        if graph_emb.dim() == 1:
            # 推理分支：扩成 (K, graph_hid_dim)
            K = action_feats.size(0)
            graph_rep = graph_emb.unsqueeze(0).expand(K, -1)
        else:
            # 批量分支：graph_emb already (B, graph_hid_dim), action_feats is also (B, D)
            graph_rep = graph_emb

        # 现在两个都是 (N, feat)
        x = torch.cat([graph_rep, action_feats], dim=1)  # (N, graph_hid_dim+agent_feat_dim)
        h = F.relu(self.fc1(x))
        h = F.relu(self.fc2(h))
        return self.out(h).view(-1)  # (N,)




class GNNDQNAgent:
    def __init__(self,
                 graph_dim: int,        # 节点特征维度 x.shape[1] == graph_dim
                 graph_hid_dim: int,    # GNN 输出的隐藏维度
                 q_net_hid_dim: int,    # QNetworkGNN 的隐藏层维度
                 agent_feat_dim: int,   # 动作特征维度
                 lr: float = 1e-3,
                 gamma: float = 0.99,
                 eps: float = 0.1,
                 target_update: int = 200,
                 device: str = 'cpu'):
        self.device = torch.device(device)
        # 1) GNN 编码器：从 (N, graph_dim) → (1, graph_hid_dim)
        self.gnn = GNNEncoder(in_dim=graph_dim, hid_dim=graph_hid_dim).to(self.device)
        # 2) Q 网络：输入是 [graph_rep, action_feat] 大小 graph_hid_dim + agent_feat_dim
        self.net = QNetworkGNN(
            graph_hid_dim=graph_hid_dim,
            q_hid_dim=q_net_hid_dim,
            agent_feat_dim=agent_feat_dim
        ).to(self.device)
        # 3) 目标网络
        self.target_net = QNetworkGNN(
            graph_hid_dim=graph_hid_dim,
            q_hid_dim=q_net_hid_dim,
            agent_feat_dim=agent_feat_dim
        ).to(self.device)
        self.target_net.load_state_dict(self.net.state_dict())

        self.opt = torch.optim.Adam(self.net.parameters(), lr=lr)
        self.gamma = gamma
        self.eps = eps
        self.target_update = target_update
        self.buffer = ReplayBuffer(10000)
        self.batch_size = 64
        self.step_cnt = 0

    def select_action(self, graph_data, agent_feats, is_training=True):
        """
        graph_data: PyG Data 包含 x, edge_index, batch
        action_feats: Tensor[K, agent_feat_dim]
        返回：在 action_feats 范围内的索引 ∈ [0, K)
        """
        K = agent_feats.shape[0]
        if is_training and random.random() < self.eps:
            return random.randrange(K)
        with torch.no_grad():
            graph_emb = self.gnn(graph_data)       # (hid_dim,)
            q_vals    = self.net(graph_emb, agent_feats)  # (K,)
        return int(q_vals.argmax().item())

    def store(self, graph_data, agent_feats, action_idx, reward, next_graph_data, next_agent_feats, done):
        self.buffer.add(graph_data, agent_feats, action_idx, reward, next_graph_data, next_agent_feats, done)

    def update(self):
        if len(self.buffer) < self.batch_size:
            return

        # 1) 从 buffer 中采样
        g_data, a_feats, acts, rews, ng_data, na_feats, dones = \
            self.buffer.sample(self.batch_size)

        # 2) 变成张量
        # graph_embs
        graph_embs = torch.stack([ self.gnn(data) for data in g_data ], dim=0)
        next_graph_embs = torch.stack([ self.gnn(data) for data in ng_data ], dim=0)
        # action_feats
        batch_feats = torch.stack([feats[idx] for feats, idx in zip(a_feats, acts)]).to(self.device)  # (B, D)
        batch_next_feats = na_feats                            # 留到计算 max_q 的时候再转张量
        # actions, rewards, dones
        batch_acts  = torch.tensor(acts, dtype=torch.long, device=self.device)   # (B,)
        batch_rews  = torch.tensor(rews, dtype=torch.float, device=self.device)  # (B,)
        batch_dones = torch.tensor(dones, dtype=torch.float, device=self.device) # (B,)

        # 3) 当前 Q
        # print(graph_embs.shape)
        q_pred = self.net(graph_embs, batch_feats)  # (B,)
        # 4) 下步目标 Q：对每个 next_feats 做 max
        next_q_max = []
        with torch.no_grad():
            for emb, nf in zip(next_graph_embs, batch_next_feats):
                nf_tensor = torch.tensor(nf, dtype=torch.float32, device=self.device)
                qn = self.target_net(emb, nf_tensor)  # (K',)
                next_q_max.append(qn.max().item())
        next_q_max = torch.tensor(next_q_max, dtype=torch.float, device=self.device)  # (B,)

        # 5) 目标
        q_target = batch_rews + self.gamma * next_q_max * (1 - batch_dones)

        # 6) 更新
        loss = F.mse_loss(q_pred, q_target)
        self.opt.zero_grad()
        loss.backward()
        self.opt.step()

        # 7) 更新 target net
        self.step_cnt += 1
        if self.step_cnt % self.target_update == 0:
            self.target_net.load_state_dict(self.net.state_dict())




def save_agent(agent: GNNDQNAgent, path: str):
    """
    保存 agent 的模型参数和优化器状态
    """
    checkpoint = {
        # GNN 编码器参数
        'gnn_state_dict':        agent.gnn.state_dict(),
        # Q 网络（主网和目标网）参数
        'qnet_state_dict':       agent.net.state_dict(),
        'target_qnet_state_dict':agent.target_net.state_dict(),
        # 优化器状态（这里假定你在 agent 中叫 opt，而不是 optimizer）
        'optimizer_state_dict':  agent.opt.state_dict(),
        # 训练超参数/进度
        'epsilon':               agent.eps,
        'step_count':            agent.step_cnt,
        # 你如果还想恢复 buffer，可序列化 agent.buffer.buffer
        # 'replay_buffer':         list(agent.buffer.buffer),
    }
    torch.save(checkpoint, path)
    print(f"Saved agent to {path}")

def load_agent(agent: GNNDQNAgent, path: str, map_location=None):
    """
    从磁盘恢复 agent 的状态
    """
    checkpoint = torch.load(path, map_location=map_location)
    # 恢复 GNN 编码器
    agent.gnn.load_state_dict(          checkpoint['gnn_state_dict'])
    # 恢复 Q 网络
    agent.net.load_state_dict(          checkpoint['qnet_state_dict'])
    agent.target_net.load_state_dict(   checkpoint['target_qnet_state_dict'])
    # 恢复优化器
    agent.opt.load_state_dict(          checkpoint['optimizer_state_dict'])
    # 恢复超参数/训练进度
    agent.eps     = checkpoint.get('epsilon',    agent.eps)
    agent.step_cnt= checkpoint.get('step_count', agent.step_cnt)
    # 如果你也想恢复 replay buffer，可以这样做（但要确保 pickle 支持你的 buffer 元素）：
    # agent.buffer.buffer = collections.deque(checkpoint.get('replay_buffer', []),
    #                                         maxlen=agent.buffer.buffer.maxlen)
    print(f"Loaded agent from {path}")


##########################       以下是原纯dqn代码        ############################
##########################       以下是原纯dqn代码        ############################
##########################       以下是原纯dqn代码        ############################
##########################       以下是原纯dqn代码        ############################

class DQNReplayBuffer:
    ''' 经验回放池 '''
    def __init__(self, capacity):
        self.buffer = collections.deque(maxlen=capacity)  # 队列,先进先出

    def add(self, state, action, reward, next_state, done):  # 将数据加入buffer
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):  # 从buffer中采样数据,数量为batch_size
        transitions = random.sample(self.buffer, batch_size)
        state, action, reward, next_state, done = zip(*transitions)
        return np.array(state), action, reward, np.array(next_state), done

    def size(self):  # 目前buffer中数据的数量
        return len(self.buffer)
    

class QNetwork(torch.nn.Module):
    def __init__(self, feature_dim, hidden_dim):
        super().__init__()
        self.fc1 = torch.nn.Linear(feature_dim, hidden_dim)
        self.fc2 = torch.nn.Linear(hidden_dim, hidden_dim)
        self.out = torch.nn.Linear(hidden_dim, 1)

    def forward(self, x):
        h = F.relu(self.fc1(x))
        h = F.relu(self.fc2(h))
        return self.out(h).view(-1)  # 返回 (batch,) 形状的 Q 值、




class DQNAgent:
    def __init__(self, feature_dim, hidden_dim, lr=2e-3, gamma=0.1, epsilon=0.5, target_update=100, device=torch.device("cpu")):
        self.net = QNetwork(feature_dim, hidden_dim).to(device)
        self.target_net = QNetwork(feature_dim, hidden_dim).to(device)
        self.optimizer = torch.optim.Adam(self.net.parameters(), lr=lr)
        self.gamma = gamma
        self.epsilon = epsilon
        self.target_update = target_update
        self.device = device
        self.buffer = deque(maxlen=10000)
        self.batch_size = 64
        self.step_count = 0

    def select_action(self, feats: np.ndarray, is_training = True):
        """
        feats: np.ndarray, shape (K, D)
        返回：idx ∈ [0, K)  —— 这是在 feats 里的索引
        """
        
        K = feats.shape[0]
        if K > 1:
            feats_ = feats[:-1, :]
        else:
            feats_ = feats

        if is_training:
            if random.random() < self.epsilon:
                return random.randrange(K)
            with torch.no_grad():
                x = torch.tensor(feats_, device=self.device)            # (K, D)
                q = self.net(x)                                   # (K,)
            return int(torch.argmax(q).item())
        else:
            with torch.no_grad():
                x = torch.tensor(feats_, device=self.device)            # (K, D)
                q = self.net(x)                                   # (K,)
            return int(torch.argmax(q).item())

    def store(self, feat, action_idx, reward, next_feat, done):
        self.buffer.append((feat, action_idx, reward, next_feat, done))

    def update(self):
        if len(self.buffer) < self.batch_size:
            return
        batch = random.sample(self.buffer, self.batch_size)
        feats, acts, rews, next_feats_batch, dones = zip(*batch)

        feats = torch.tensor(np.stack([f.cpu().numpy() for f in feats]),device=self.device)
        rews  = torch.tensor(rews, dtype=torch.float, device=self.device)  # (B,)
        dones = torch.tensor(dones, dtype=torch.float, device=self.device) # (B,)

        # 当前 Q 值：直接对每个 feat 输出 Q
        q_pred = self.net(feats)  # (B,)

        # 目标 Q 值：对每个 next_feats 取最大 Q
        next_q_max = []
        with torch.no_grad():
            for nf in next_feats_batch:
                nf_tensor = torch.tensor(nf, dtype=torch.float32, device=self.device)  # (K, D)
                q_values = self.target_net(nf_tensor)  # (K,)
                max_q = torch.max(q_values).item()
                next_q_max.append(max_q)
        next_q_max = torch.tensor(next_q_max, dtype=torch.float32, device=self.device)  # (B,)

        q_target = rews + self.gamma * next_q_max * (1 - dones)

        # MSE loss
        loss = F.mse_loss(q_pred, q_target)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        self.step_count += 1
        if self.step_count % self.target_update == 0:
            self.target_net.load_state_dict(self.net.state_dict())



def dqn_save_agent(agent: DQNAgent, path: str):
    """
    保存 agent 的模型参数和优化器状态
    """
    checkpoint = {
        'net_state_dict':   agent.net.state_dict(),
        'target_state_dict':agent.target_net.state_dict(),
        'optimizer_state_dict': agent.optimizer.state_dict(),
        # 如果你想保存训练进度，还可以加：
        'epsilon': agent.epsilon,
        'step_count': agent.step_count,
        # … 以及其它你关心的变量 …
    }
    torch.save(checkpoint, path)
    print(f"Saved agent to {path}")


def dqn_load_agent(agent: DQNAgent, path: str, map_location=None):
    """
    从磁盘恢复 agent 的状态
    """
    checkpoint = torch.load(path, map_location=map_location)
    agent.net.load_state_dict(   checkpoint['net_state_dict'])
    agent.target_net.load_state_dict(checkpoint['target_state_dict'])
    agent.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    # 恢复其它变量
    agent.epsilon    = checkpoint.get('epsilon',    agent.epsilon)
    agent.step_count = checkpoint.get('step_count', agent.step_count)
    print(f"Loaded agent from {path}")


