import torch
import torch.nn as nn
import torch.optim as optim
import random
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import itertools

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 检查 GPU

def build_dag_with_weights(layers):
    G = nx.DiGraph()
    prev_layer_nodes = [0]
    node_counter = 0
    weights = {}
    node_weights = {}  # 节点权重字典
    layer_thresholds = []  # 存储每层的阈值

    for layer_size in layers:
        current_layer_nodes = list(range(node_counter, node_counter + layer_size))
        node_counter += layer_size
        layer_weights = []  # 存储当前层的所有边权重

        for prev_node in prev_layer_nodes:
            for curr_node in current_layer_nodes:
                weight = random.uniform(1, 10)
                G.add_edge(prev_node, curr_node)
                weights[(prev_node, curr_node)] = weight
                layer_weights.append(weight)

        # 计算当前层的阈值，例如使用平均值
        if layer_weights:
            layer_threshold = np.mean(layer_weights)
            layer_thresholds.append(layer_threshold)

        prev_layer_nodes = current_layer_nodes

    return G, weights, node_weights, layer_thresholds


# DQN 模型
class DQN(nn.Module):
    def __init__(self, state_size, action_size):
        super(DQN, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(state_size, 128),
            nn.ReLU(),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, action_size),
        )

    def forward(self, x):
        return self.fc(x)


# 训练 DQN
class DQNAgent:
    def __init__(self, state_size, action_size, weights, node_weights, gamma=0.99, lr=0.001, epsilon=1.0, epsilon_decay=0.995, epsilon_min=0.01):
        self.weights = weights
        self.state_size = state_size
        self.action_size = action_size  # This will now represent the maximum number of combinations of actions
        self.gamma = gamma
        self.epsilon = epsilon
        self.epsilon_decay = epsilon_decay
        self.epsilon_min = epsilon_min
        self.node_weights = node_weights
        self.model = DQN(state_size, action_size).to(device)
        self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
        self.criterion = nn.MSELoss()

        self.memory = []

    def act(self, state, successors, layer_threshold):
        # Generate all valid combinations of actions
        valid_actions = []
        for num in range(len(successors) + 1, 0, -1):
            for combo in itertools.combinations(successors, num):
                # Check if all edges exist and the sum of node weights in the combo does not exceed the layer threshold
                if all(self.weights.get((state, succ)) is not None for succ in combo) and \
                   sum(self.node_weights[succ] for succ in combo) <= layer_threshold:
                    print(f"sum: {sum(self.node_weights[succ] for succ in combo)}")
                    print(f"layer_threshold: {layer_threshold}")
                    valid_actions.append(combo)
                    return valid_actions
        # If no valid actions, choose a random combination
        if not valid_actions:
            return [random.choice(list(itertools.combinations(successors, 1)))]
        return valid_actions

    def remember(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))
        if len(self.memory) > 1000:
            self.memory.pop(0)

    def replay(self, batch_size=32):
        if len(self.memory) < batch_size:
            return

        batch = random.sample(self.memory, batch_size)
        states, actions, rewards, next_states, dones = zip(*batch)

        # 更新奖励逻辑：奖励是所选路径的权重总和
        updated_rewards = []
        for state, action in zip(states, actions):
            total_reward = sum(self.weights[(s, a)] for s, a in zip(state, action))
            updated_rewards.append(total_reward)

        states = torch.FloatTensor(states).to(device)
        actions = torch.LongTensor(actions).to(device)
        rewards = torch.FloatTensor(updated_rewards).to(device)
        next_states = torch.FloatTensor(next_states).to(device)
        dones = torch.FloatTensor(dones).to(device)

        # 计算 Q 值
        q_values = self.model(states).gather(1, actions.unsqueeze(-1)).squeeze(-1)

        # 计算目标 Q 值
        with torch.no_grad():
            next_q_values = self.model(next_states).max(1)[0]
            targets = rewards + self.gamma * next_q_values * (1 - dones)

        # 更新模型
        self.optimizer.zero_grad()
        loss = self.criterion(q_values, targets)
        loss.backward()
        self.optimizer.step()

        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay

# 构建 DAG 并赋予随机权重
def build_dag_with_weights(layers):
    G = nx.DiGraph()
    prev_layer_nodes = [0]
    node_counter = 0
    weights = {}
    node_weights = {}  # 节点权重字典
    layer_thresholds = []  # 存储每层的阈值
    node_weights[0] = random.uniform(1, 10)
    for layer_size in layers:
        current_layer_nodes = list(range(node_counter, node_counter + layer_size))
        node_counter += layer_size
        layer_weights = []  # 存储当前层的所有边权重
        for curr_node in current_layer_nodes:
            node_weight = random.uniform(1, 10)
            node_weights[curr_node] = node_weight
            layer_weights.append(node_weight)
        for prev_node in prev_layer_nodes:
            for curr_node in current_layer_nodes:
                if prev_node != curr_node:  # 确保不添加自环
                    weight = random.uniform(1, 10)
                    G.add_edge(prev_node, curr_node)
                    weights[(prev_node, curr_node)] = weight

        # 计算当前层的阈值，例如使用平均值
        if layer_weights:
            layer_threshold = np.mean(layer_weights)
            layer_thresholds.append(layer_threshold)
            print(f"Layer weights{layer_size}: {layer_weights}, Calculated threshold: {layer_threshold}")

        prev_layer_nodes = current_layer_nodes

    return G, weights, node_weights, layer_thresholds


# 可视化路径选择
def visualize_path(graph, weights, node_weights, node_positions, layer_thresholds, selected_paths=None):
    plt.figure(figsize=(12, 8))  # 设置图形大小
    pos = {node: (node_positions[node][0], -node_positions[node][1]) for node in graph.nodes()}  # 调整坐标系统

    # 绘制图形
    nx.draw(graph, pos, with_labels=False, node_color='skyblue', node_size=500, edge_color='k', linewidths=1, font_size=15)

    # 绘制边的权重
    edge_labels = {(u, v): f"{weights[(u, v)]:.1f}" for u, v in graph.edges()}
    nx.draw_networkx_edge_labels(graph, pos, edge_labels=edge_labels, font_color='red')

    # 绘制节点权重
    node_labels = {node: f"{node_weights[node]:.1f}" for node in graph.nodes()}
    nx.draw_networkx_labels(graph, pos, labels=node_labels, font_color='green')

    # 如果提供了selected_paths，绘制这些路径
    if selected_paths:
        for path in selected_paths:
            if isinstance(path, tuple):  # 确保 path 是元组
                nx.draw_networkx_edges(graph, pos, edgelist=[path], edge_color='red', width=2)
            else:
                print(f"Error: Expected tuple in selected_paths, got {type(path)}")

    # 显示每层的阈值
    for level, threshold in enumerate(layer_thresholds):
        plt.text(0.1, 0.95 - level * 0.05, f"Layer {level+1} Threshold: {threshold:.2f}", transform=plt.gca().transAxes, fontsize=9, color='purple')

    plt.title("DQN Pathfinding in DAG")
    plt.axis('off')  # 关闭坐标轴
    plt.show()  # 显示图形


# 主程序
def main():
    layers = [1, 1, 4, 1, 4, 1, 3, 1]
    G, weights, node_weights, layer_thresholds = build_dag_with_weights(layers)
    
    # Add these lines
    state_size = len(G.nodes)
    action_size = max(len(list(itertools.combinations(G.successors(n), r))) for n in G.nodes() for r in range(1, len(list(G.successors(n))) + 1))
    agent = DQNAgent(state_size, action_size, weights, node_weights)

    # 计算节点位置，使每层的节点水平均匀分布，垂直方向有固定间隔
    node_positions = {}
    y_spacing = 100  # 每层的垂直间隔
    x_center = 400  # 中心x坐标，用于居中对齐
    current_y = 50  # 初始y坐标

    node_counter = 0
    for layer_index, layer_size in enumerate(layers):
        x_spacing = 300 / (layer_size + 1)  # 计算水平间隔
        for i in range(layer_size):
            node_positions[node_counter] = (x_spacing * (i + 1), current_y)
            node_counter += 1
        current_y += y_spacing  # 更新y坐标到下一层

    # 确保所有节点都被分配了位置
    if len(node_positions) != len(G.nodes):
        print("Error: Not all nodes have positions assigned.")
        print(f"Assigned positions: {len(node_positions)}, Total nodes: {len(G.nodes)}")

    # 可视化图结构
# 训练 DQN
    for episode in range(200):
        # 初始化状态
        state = [0] * state_size
        current_nodes = [0]  # 初始节点
        state[current_nodes[0]] = 1
        total_reward = 0
        path = []  # 存储整个路径的列表

        while True:
            next_nodes = []
            for current_node in current_nodes:
                successors = list(G.successors(current_node))
                # 获取当前节点所在层的索引
                current_layer_index = find_layer_index(current_node, layers) +1
                layer_threshold = layer_thresholds[current_layer_index]  # 使用层索引而不是节点索引
                selected_successors_combinations = agent.act(current_node, successors, layer_threshold)
                # 处理每个选中的后继节点组合
                for succ_combination in selected_successors_combinations:
                    next_nodes.extend(succ_combination)
                    for succ in succ_combination:
                        if (current_node, succ) not in weights:
                            print(f"Error: No weight defined for edge ({current_node}, {succ})")
                        else:
                            reward = weights[(current_node, succ)]
                            total_reward += reward
                            path.append((current_node, succ))

            if set(next_nodes) == set(current_nodes):  # 如果没有变化，结束循环
                break

            current_nodes = list(set(next_nodes))  # 更新当前节点

            # 更新状态
            next_state = [0] * state_size
            for node in current_nodes:
                next_state[node] = 1
            state = next_state
            for edge in path:
                if not isinstance(edge, tuple):
                    print(f"Error: Path contains a non-tuple element {edge}")
            visualize_path(G, weights, node_weights, node_positions, layer_thresholds, path)  # 更新可视化

    print(f"Episode {episode + 1}: Total reward = {total_reward:.2f}")

def find_layer_index(node, layers):
    node_counter = 0
    for i, layer_size in enumerate(layers):
        if node_counter <= node < node_counter + layer_size:
            return i
        node_counter += layer_size
    return -1  # 如果节点不在任何层中，返回-1

if __name__ == "__main__":
    main()
