from collections import defaultdict
import networkx as nx
import matplotlib.pyplot as plt
import time
from datetime import datetime
import pickle
import os

from task1 import hopcroft_karp, convert_to_bipartite, visualize_directed_graph


def timer(func):

    def wrapper(*args, **kwargs):

        start_time = time.time()

        result = func(*args, **kwargs)

        end_time = time.time()

        print(f"Program: Function {func.__name__} took {end_time - start_time} seconds to run")

        return result

    return wrapper


# @timer
def hopcroft_karp_all_matchings(bipartite_graph, left_nodes):
    """Hopcroft-Karp算法扩展版，返回所有最大匹配模式"""
    # 第一步：使用标准Hopcroft-Karp算法找到最大匹配大小
    _, max_matching_size = hopcroft_karp(bipartite_graph, left_nodes.copy())
    # print(f"Hopcroft-Karp: max_matching_size: {max_matching_size}")
    yield max_matching_size

    # 第二步：使用回溯法枚举所有最大匹配
    all_matchings = []
    current_matching = {node: None for node in bipartite_graph.nodes()}

    if max_matching_size == len(left_nodes):
        return
        # return None, max_matching_size

    def backtrack(idx, count):
        """回溯函数：枚举所有可能的匹配"""
        if count + (len(left_nodes) - idx) < max_matching_size:
            return

        if idx == len(left_nodes):
            if count == max_matching_size:
                # 找到一个新的最大匹配
                matching_copy = {k: v for k, v in current_matching.items() if v is not None}
                all_matchings.append(matching_copy)
                # print(f"Hopcroft-Karp: find a new matching, now length: {len(all_matchings)}")
                yield matching_copy
            return

        u = left_nodes[idx]

        # 选项1：尝试匹配当前左部节点
        for v in list(bipartite_graph.neighbors(u)):
            # 检查v是否已被匹配
            if current_matching[v] is None:
                # 保存当前状态以便回溯
                prev_u_match = current_matching[u]
                prev_v_match = current_matching[v]

                # 设置新匹配
                current_matching[u] = v
                current_matching[v] = u

                # 递归探索
                yield from backtrack(idx + 1, count + 1)

                # 回溯：恢复之前的状态
                current_matching[u] = prev_u_match
                current_matching[v] = prev_v_match

        # 选项2：不匹配当前左部节点
        yield from backtrack(idx + 1, count)

    # 开始回溯
    yield from backtrack(0, 0)
    # return all_matchings, max_matching_size
    return


def convert_bimatching_to_di(bipartite_graph_matched):
    # 提取原图中的匹配边
    matched_edges = {}
    for out_node, in_node in bipartite_graph_matched.items():
        if out_node and in_node and out_node.endswith("_out") and in_node.endswith("_in"):
            u = int(out_node.split("_")[0])
            v = int(in_node.split("_")[0])
            matched_edges[u] = v
            # matched_edges.append((u, v))
    return matched_edges


def example2_graph():
    G = nx.DiGraph()
    G.add_nodes_from([1, 2, 3, 4, 5, 6])
    G.add_edges_from([(1, 2), (2, 6), (1, 6), (1, 3), (1, 4), (1, 5)])
    return G


def plot_out_degree_distribution(graph, node_set, draw_signal=False, title="节点出度分布", figsize=(10, 6)):
    """
    统计并可视化指定节点集合的出度分布

    参数:
    graph (nx.Graph or nx.DiGraph): 网络图对象
    node_set (set): 要分析的节点集合
    title (str): 图表标题
    figsize (tuple): 图表大小

    返回:
    dict: 出度分布字典 {度值: 频率}
    """
    # 1. 统计出度分布
    degree_dist = {}

    for node in node_set:
        # 根据图类型计算度
        if isinstance(graph, nx.DiGraph):
            # 有向图 - 出度
            degree = graph.out_degree(node)
        else:
            # 无向图 - 度
            degree = graph.degree(node)
        if degree not in degree_dist:
            degree_dist[degree] = 1
        else:
            degree_dist[degree] += 1

    # 2. 准备可视化数据
    degrees = sorted(degree_dist.keys())
    frequencies = [degree_dist[d] for d in degrees]

    if draw_signal:
        # 3. 创建图表
        plt.figure(figsize=figsize)

        # 创建柱状图
        bars = plt.bar(degrees, frequencies, color='skyblue', alpha=0.8)

        # 添加数值标签
        for bar in bars:
            height = bar.get_height()
            plt.text(bar.get_x() + bar.get_width() / 2., height,
                     f'{int(height)}',
                     ha='center', va='bottom', fontsize=9)

        # 设置标题和标签
        plt.title(title, fontsize=14)
        plt.xlabel('出度', fontsize=12)
        plt.ylabel('节点数量', fontsize=12)

        # 设置网格和刻度
        plt.grid(axis='y', linestyle='--', alpha=0.7)
        plt.xticks(degrees)

        # 添加统计信息
        total_nodes = len(node_set)
        avg_degree = sum(d * f for d, f in degree_dist.items()) / total_nodes if total_nodes > 0 else 0
        max_degree = max(degrees) if degrees else 0

        plt.figtext(0.15, 0.9, f"节点总数: {total_nodes}", fontsize=10)
        plt.figtext(0.15, 0.85, f"平均出度: {avg_degree:.2f}", fontsize=10)
        plt.figtext(0.15, 0.8, f"最大出度: {max_degree}", fontsize=10)

        # 添加分布类型注释
        if len(degrees) > 1:
            plt.figtext(0.15, 0.75, f"分布范围: {min(degrees)} - {max(degrees)}", fontsize=10)

        plt.tight_layout()
        plt.show()

    # 4. 返回出度分布字典
    return degree_dist, degrees, frequencies


def save_workspace(file_path, **variables):
    """
    保存当前工作空间的所有变量到文件

    参数:
    file_path (str): 保存路径
    variables (dict): 要保存的变量 {变量名: 变量值}
    """
    try:
        # 添加时间戳和元数据
        save_data = {
            'variables': variables,
            'metadata': {
                'save_time': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                'python_version': os.sys.version,
                'system_info': os.uname() if hasattr(os, 'uname') else None
            }
        }

        with open(file_path, 'wb') as f:
            pickle.dump(save_data, f, protocol=pickle.HIGHEST_PROTOCOL)

        print(f"✅ 成功保存 {len(variables)} 个变量到 {file_path}")
        return True
    except Exception as e:
        print(f"❌ 保存失败: {str(e)}")
        raise e


def load_workspace(file_path):
    """
    从文件加载保存的工作空间

    参数:
    file_path (str): 文件路径

    返回:
    dict: 包含所有变量的字典
    """
    try:
        with open(file_path, 'rb') as f:
            save_data = pickle.load(f)

        print(f"✅ 成功加载 {len(save_data['variables'])} 个变量")
        print(f"保存时间: {save_data['metadata']['save_time']}")

        # 将变量注入当前命名空间
        globals().update(save_data['variables'])

        return save_data['variables']
    except FileNotFoundError:
        print(f"❌ 文件不存在: {file_path}")
        return {}
    except Exception as e:
        print(f"❌ 加载失败: {str(e)}")
        raise e


if __name__ == '__main__':
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
    plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

    # exa2 = example2_graph()
    # exa2_bi, exa2_left, exa2_right = convert_to_bipartite(exa2)
    # exa2_bi_all_matchings, exa2_bi_matching_size = hopcroft_karp_all_matchings(exa2_bi, exa2_left)
    #
    # exa2_all_matching = [convert_bimatching_to_di(matching) for matching in exa2_bi_all_matchings]
    #
    # # 打印结果
    # # print(f"最大匹配大小: {exa2_bi_all_matchings}")
    # # print(f"找到 {len(exa2_bi_all_matchings)} 种不同的最大匹配模式:")
    # for i, matching in enumerate(exa2_bi_all_matchings):
    #     print(f"\n匹配模式 {i}:")
    #     for left, right in matching.items():
    #         if left in exa2_left:  # 确保只打印左部到右部的映射
    #             print(f"  {left} → {right}")
    #
    # for i, matching in enumerate(exa2_all_matching):
    #     visualize_directed_graph(exa2, matching, f"{i}-th matching")

    er_nodes_size, er_probability = 20, 0.1
    er_digraph = nx.erdos_renyi_graph(er_nodes_size, er_probability, directed=True, seed=42)
    print(f"ER有向图: {len(er_digraph.nodes())}节点, {len(er_digraph.edges())}边")
    er_bi, er_left, er_right = convert_to_bipartite(er_digraph)

    hk_all_matchings = hopcroft_karp_all_matchings(er_bi, er_left)
    er_bi_matching_size = next(hk_all_matchings)
    print(f"驱动节点个数: {len(er_digraph.nodes()) - er_bi_matching_size}")

    er_all_matchings = []
    er_drivenodes = []
    er_all_nodes = set(er_digraph.nodes())
    for i, matching in enumerate(hk_all_matchings):
        er_di_matching = convert_bimatching_to_di(matching)
        er_all_matchings.append(er_di_matching)
        er_drivenodes.append(er_all_nodes - set(er_di_matching.keys()))
        if i % 50 == 0:
            print(f"第{i+1:03d}个解，总共{len(er_all_matchings)}个解")
    print(f"\n共找到{len(er_all_matchings)}个解")

    # er_bi_all_matchings, er_bi_matching_size = hopcroft_karp_all_matchings(er_bi, er_left)


    # if er_bi_all_matchings is not None:
    #     er_all_matchings = [convert_bimatching_to_di(matching) for matching in er_bi_all_matchings]
    #
    #     er_all_nodes = set(er_digraph.nodes())
    #     er_drivenodes = [er_all_nodes - set(er_matching.keys()) for er_matching in er_all_matchings]

        # 打印结果
        # print(f"找到 {len(er_bi_all_matchings)} 种不同的最大匹配模式:")
        # for i, matching in enumerate(er_bi_all_matchings):
        #     print(f"\n匹配模式 {i}:")
        #     for left, right in matching.items():
        #         if left in er_left:  # 确保只打印左部到右部的映射
        #             print(f"  {left} → {right}")

        # for i, matching in enumerate(er_all_matchings):
        #     visualize_directed_graph(er_digraph, matching, f"{i}-th matching")

        # for i, nodes in enumerate(er_drivenodes):
        #     print(f"[{i+1}] {nodes}")

    print(f"不同匹配模式的度分布")
    for i, drivenode in enumerate(er_drivenodes):
        degree_dist, degrees, frequencies = plot_out_degree_distribution(er_digraph, drivenode)
        print(f"{i+1:03d}: {degree_dist}")

    save_workspace("result_task2.pkl", er_digraph=er_digraph, er_all_matchings=er_all_matchings, er_bi_matching_size=er_bi_matching_size, er_drivenodes=er_drivenodes)
