from Graph import Get_graph, Get_Kcore
import numpy as np
from torch_geometric.utils import from_networkx
from torch_geometric.nn import GATConv
from Graph import Get_graph
import copy
import torch.nn as nn
import random
from collections import deque
import torch
import torch.nn.functional as F
import torch.optim as optim
from Propagation_model import WIC_spread, IC_spread


class ReplayBuffer:
    def __init__(self, capacity=10000):
        self.buffer = deque(maxlen=capacity)

    def push(self, transition):
        self.buffer.append(transition)

    def sample(self, batch_size):
        return random.sample(self.buffer, batch_size)


class IMEnvironment:
    def __init__(self, G, K):
        """
        :param N: 节点数量
        :param K: 种子数量
        """
        self.N = G.number_of_nodes()
        self.G = G
        self.K = K
        self.reset()
        self.experience = ReplayBuffer()

    def reset(self):
        """环境重置，返回初始状态"""
        self.now_state = np.zeros(self.N, dtype=np.float32)
        self.now_seed = set()
        self.now_activate_num = 0
        self.done = False

    def step(self, action):

        if action in self.now_seed:
            return

        self.action = action

        # 旧状态
        state = copy.deepcopy(self.now_state)
        if min(G.nodes) > 0:
            # 新状态
            self.now_state[action - 1] = 1.0
        else:
            # 新状态
            self.now_state[action] = 1.0
        # 新种子集
        self.now_seed.add(action)

        # 新的激活节点集
        seed22 = set(self.now_seed)
        activate_num = WIC_spread(G, seed_set=seed22, num_simulations=500)

        # 奖励
        self.reward = activate_num - self.now_activate_num
        # 更新激活节点集
        self.now_activate_num = activate_num

        self.done = len(self.now_seed) >= self.K
        if min(G.nodes) > 0:
            self.experience.push((state, self.action - 1, self.reward, self.now_state, self.done))
        else:
            self.experience.push((state, self.action, self.reward, self.now_state, self.done))


class DoubleGNN_DQN(nn.Module):
    def __init__(self, out_dim, heads=1):
        super(DoubleGNN_DQN, self).__init__()
        self.gat_s = GATConv(1, out_dim, heads=heads, concat=False)
        self.gat_t = GATConv(1, out_dim, heads=heads, concat=False)
        self.fc1 = nn.Linear(1 + 2 * out_dim, 512)
        self.fc2 = nn.Linear(512, 1)

    def forward(self, state_batch, x, edge_index):
        """
        :param state_batch: list of numpy arrays，长度为 B，每个是 shape=(N,)
        :param x: 节点度特征 (N, 1)，torch.tensor
        :param edge_index: 边索引 (2, E)
        :return: Q 值 (B, 1)
        """
        s = F.leaky_relu(self.gat_s(x, edge_index))  # (N, d)
        t = F.leaky_relu(self.gat_t(x, edge_index))  # (N, d)
        c = torch.cat([s, t], dim=1)  # (N, 2d)

        combined_batch = []
        for s_1 in state_batch:
            s_tensor = torch.tensor(s_1, dtype=torch.float32).unsqueeze(1)  # (N, 1)
            combined = torch.cat([s_tensor, c], dim=1)  # (N, 1+2d)
            combined_batch.append(combined)

        b = torch.stack(combined_batch)  # (B, N, 1+2d)
        x = F.relu(self.fc1(b))         # (B, N, 512)
        q_values = self.fc2(x).squeeze(-1)  # (B, N)

        return q_values  # 每个 batch 中，每个节点的 Q 值



def select_action(q_values, available_actions, epsilon):
    """
    :param q_values: tensor of shape (N,), Q值向量
    :param available_actions: 当前可选动作（未被选中的节点索引列表）
    :param epsilon: ε-greedy 概率
    :return: 动作（节点索引）
    """
    if random.random() < epsilon:
        return random.choice(available_actions)
    else:
        q_values_filtered = q_values[available_actions]
        max_index = torch.argmax(q_values_filtered).item()
        return available_actions[max_index]


def train_dqn(G, env, deg_feat, K,data, episodes=500, batch_size=64, gamma=0.99, epsilon_start=1.0, epsilon_end=0,
              epsilon_decay=0.98, C=50):
    """
    训练DQN模型来选择影响力最大化的节点
    :param G: networkx图
    :param emd: 节点嵌入矩阵 [N, d]
    :param K: 目标种子集合大小
    """

    dqn = DoubleGNN_DQN(32)
    target_dqn = DoubleGNN_DQN(32)
    target_dqn.load_state_dict(dqn.state_dict())
    optimizer = optim.Adam(dqn.parameters(), lr=1e-3)

    if min(G.nodes) > 0:
        all_node = {i + 1 for i in range(env.N)}
    else:
        all_node = {i for i in range(env.N)}

    epsilon = epsilon_start

    for episode in range(episodes):
        env.reset()
        while not env.done:
            if random.random() < epsilon:
                action = random.choice(list(all_node - env.now_seed))
            else:
                sorted_indices = torch.argsort(dqn([env.now_state], deg_feat, data.edge_index).squeeze(0).detach()).tolist()
                for idx in sorted_indices:
                    if idx + 1 not in env.now_seed:
                        action = idx + 1
                        break

            env.step(action)

            # 更新DQN
            if len(env.experience.buffer) >= batch_size:
                batch = env.experience.sample(batch_size)
                s_batch, a_batch, r_batch, s1_batch, done_batch = zip(*batch)
                a_batch = torch.tensor(a_batch, dtype=torch.long)
                r_batch = torch.tensor(r_batch, dtype=torch.float32)


                q_values = dqn(s_batch, deg_feat, data.edge_index)  # (B, N)
                q_value = q_values.gather(1, a_batch.unsqueeze(1)).squeeze(1)

                with torch.no_grad():
                    next_q_values = target_dqn(s1_batch, deg_feat, data.edge_index)  # (B, N)
                    max_next_q = next_q_values.max(1)[0]

                target = r_batch + gamma * max_next_q
                loss = F.mse_loss(q_value, target)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            # 目标网络软更新
            if episode % C == 0:
                target_dqn.load_state_dict(dqn.state_dict())

        epsilon = max(epsilon * epsilon_decay, epsilon_end)
        print(f"[Episode {episode + 1}] Total Reward: {env.now_activate_num:.2f}, Epsilon: {epsilon:.4f}")

    # 评估
    env.reset()
    dqn.eval()
    while not env.done:
        sorted_indices = torch.argsort(dqn([env.now_state], deg_feat, data.edge_index).squeeze(0).detach()).tolist()
        for idx in sorted_indices:
            if idx + 1 not in env.now_seed:
                action = idx + 1
                break
        env.step(action)

    seed22 = set(env.now_seed)
    print(seed22)
    influence = WIC_spread(G, seed_set=seed22, num_simulations=500)
    print(f"种子集合的平均影响力传播范围为：{influence:.2f} 个节点")

    influence = IC_spread(G, seed22, activation_prob=0.05, num_simulations=500)
    print(f"IC模型下平均影响力传播：{influence:.2f} 个节点")


def _get_state_vector(self):
    """
    拼接状态：当前0/1状态 + 节点嵌入，返回 [N × (1+d)] 的特征矩阵
    每一行是某节点的 [selected_flag, embedding]
    """
    state_matrix = np.concatenate(
        [self.state.reshape(-1, 1), self.embedding], axis=1  # [N, 1+d]
    )
    return state_matrix


# 数据集
data = 'soc-dolphins'
# 种子集个数
K = 19
# 迭代次数
epochs = 100
# 学习率
α = 0.1
# 折扣率
γ = 0.9
# 贪心率
ε = 0.5

# 获取当前图
G = Get_graph(data)

# 获取每个节点的k core
core_dict = Get_Kcore(G)

import torch

# 假设 G 是 networkx 图，已加载
degrees = dict(G.degree())
node_list = list(G.nodes())
deg_feat = torch.tensor([[degrees[n]] for n in node_list], dtype=torch.float32)  # shape: (N, 1)

# 转为 PyG 格式
data = from_networkx(G)

env = IMEnvironment(G, K)

train_dqn(G, env, deg_feat, K,data)
