{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "from xmark import XMark_benchmark\n",
    "import networkx as nx\n",
    "import numpy as np\n",
    "import random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def homophily (graph, label):\n",
    "    similar = [(u, v) for u, v in graph.edges() if graph.nodes[u][label] == graph.nodes[v][label]]\n",
    "    obs = len(similar) / len(graph.edges())\n",
    "    \n",
    "    similar_random = 0\n",
    "    for u in graph.nodes():\n",
    "        v = random.choice(list(graph.nodes()))\n",
    "        if graph.nodes[u][label] == graph.nodes[v][label]:\n",
    "            similar_random += 1\n",
    "            \n",
    "    exp = similar_random / len(graph.nodes())\n",
    "    \n",
    "    return obs, exp\n",
    "\n",
    "def sse(coms, graph, label):\n",
    "    lab_nod_coms = []\n",
    "    for c in coms:\n",
    "        part = []\n",
    "        for el in c:\n",
    "            part.append(graph.nodes[el][label])\n",
    "        lab_nod_coms.append(part)\n",
    "        \n",
    "    sse = []\n",
    "    tot_centr = []\n",
    "    for c in lab_nod_coms:\n",
    "        centroid = np.mean(c)\n",
    "        tot_centr.append(centroid)\n",
    "        sse_c = 0\n",
    "        for n in c:\n",
    "            sse_c += abs( (n - centroid)**2 )\n",
    "        sse.append(sse_c) \n",
    "        \n",
    "    return sum(sse)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Categorical attributes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "label_0 m=2\n",
      "observed homophily:  0.6269156244495332\n",
      "expected homophily:  0.479\n",
      "categorical newman's coefficient:  0.2502573989696535\n",
      "\n",
      "label_1 m=5\n",
      "observed homophily:  0.43297516293817156\n",
      "expected homophily:  0.238\n",
      "categorical newman's coefficient:  0.25948473986330645\n",
      "\n",
      "label_2 m=auto\n",
      "observed homophily:  0.307556808173331\n",
      "expected homophily:  0.0685\n",
      "categorical newman's coefficient:  0.25554249697182035\n",
      "\n"
     ]
    }
   ],
   "source": [
    "N = 2000\n",
    "gamma = 3\n",
    "beta = 2\n",
    "#auto: number of labels equal to number of communities\n",
    "m_cat = [2, 5, 'auto']\n",
    "theta = 0.3\n",
    "mu = 0.3\n",
    "avg_k = 10\n",
    "min_com = 20\n",
    "\n",
    "g = XMark_benchmark(N, gamma, beta, mu,\n",
    "          labels=m_cat,\n",
    "          noise=theta,\n",
    "          average_degree=avg_k, min_community=min_com,\n",
    "          type_attr=\"categorical\")\n",
    "\n",
    "for i, m in enumerate(m_cat):\n",
    "    print('label_' + str(i), \"m=\" + str(m))\n",
    "    obs, exp = homophily(g, 'label_' + str(i))\n",
    "    print(\"observed homophily: \", obs)\n",
    "    print(\"expected homophily: \", exp)\n",
    "    r = nx.attribute_assortativity_coefficient(g, 'label_' + str(i))\n",
    "    print(\"categorical newman's coefficient: \", r)\n",
    "    print(\"\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{1, 2}\n",
      "{1, 2, 3, 4, 5}\n",
      "{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22}\n"
     ]
    }
   ],
   "source": [
    "for i, m in enumerate(m_cat):\n",
    "    dict_val = nx.get_node_attributes(g, 'label_' + str(i))\n",
    "    dom = set(list(dict_val.values()))\n",
    "    print(dom)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Continuous attributes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SSE 1:  511.8815947368363\n",
      "SSE 2:  484.3988124362103\n"
     ]
    }
   ],
   "source": [
    "N = 2000\n",
    "gamma = 3\n",
    "beta = 2\n",
    "#studied in the paper, number of labels equal to number of communities\n",
    "#m_cont = [\"auto\", \"auto\"]\n",
    "m_cont = [2, 5] #连续属性峰值数 (即属性的模态数量)。[2, 5] 表示生成两个连续属性，第一个有 2 个峰值，第二个有 5 个峰值 。\n",
    "# 生成器会使用一个由 5 个具有不同均值的正态分布混合而成的分布来为节点分配 Label_1 的值。\n",
    "sigma = 0.5 #标准差：控制每个社区内属性值的离散程度（噪声）。值越低，属性越清晰\n",
    "mu = 0.3 #混合参数 (控制拓扑结构难度)\n",
    "avg_k = 10 #平均度\n",
    "min_com = 20 #最小社区大小\n",
    "\n",
    "g = XMark_benchmark(N, gamma, beta, mu,\n",
    "          labels=m_cont,\n",
    "          std=sigma,\n",
    "          average_degree=avg_k, min_community=min_com,\n",
    "          type_attr=\"continuous\")\n",
    "\n",
    "coms = {frozenset(g.nodes[v]['community']) for v in g}\n",
    "coms = [list(c) for c in coms]\n",
    "\n",
    "val_sse1 = sse(coms, g, 'label_0')\n",
    "print(\"SSE 1: \", val_sse1)\n",
    "val_sse2 = sse(coms, g, 'label_1')\n",
    "print(\"SSE 2: \", val_sse2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- 开始导出网络数据 ---\n",
      "✅ 拓扑结构（边列表）已导出到: xmark_continuous_edge_list.csv\n",
      "✅ 节点属性和真实划分已导出到: xmark_continuous_node_attributes_and_truth.csv\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import networkx as nx\n",
    "\n",
    "# 假设 Cell 7 已经成功执行，并且图对象存储在变量 g 中\n",
    "\n",
    "print(\"--- 开始导出网络数据 ---\")\n",
    "\n",
    "# 1. 导出拓扑结构 (边列表)\n",
    "# ----------------------------------\n",
    "edge_list_path = \"xmark_continuous_edge_list.csv\"\n",
    "# data=False 表示只导出节点 ID，不包含边上的属性（因为 X-Mark 默认是无权图）\n",
    "nx.write_edgelist(g, edge_list_path, delimiter=\",\", data=False)\n",
    "\n",
    "print(f\" 拓扑结构（边列表）已导出到: {edge_list_path}\")\n",
    "\n",
    "\n",
    "# 2. 导出节点属性和真实划分\n",
    "# ----------------------------------\n",
    "node_data = []\n",
    "# 遍历图 g 中的所有节点及其数据\n",
    "for node_id, data in g.nodes(data=True):\n",
    "    # 提取节点ID、真实社区ID和所有生成的连续属性\n",
    "    row = {\n",
    "        'NodeID': node_id,\n",
    "        # 'community' 存储真实划分 (C)\n",
    "        'CommunityID': data['community'], \n",
    "        # 'label_0' 存储第一个连续属性 (对应 m_cont=2)\n",
    "        'Label_0': data['label_0'],\n",
    "        # 'label_1' 存储第二个连续属性 (对应 m_cont=5)\n",
    "        'Label_1': data['label_1'], \n",
    "    }\n",
    "    node_data.append(row)\n",
    "\n",
    "# 转换为 DataFrame 并导出为 CSV\n",
    "df_nodes = pd.DataFrame(node_data)\n",
    "attributes_path = \"xmark_continuous_node_attributes_and_truth.csv\"\n",
    "df_nodes.to_csv(attributes_path, index=False)\n",
    "\n",
    "print(f\" 节点属性和真实划分已导出到: {attributes_path}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "NodeID: 0, Community Value Type: <class 'set'>, Value: {0, 1036, 146, 150, 1818, 1050, 810, 47, 943, 560, 1200, 57, 190, 1472, 1861, 1096, 456, 1352, 95, 121, 613, 1639, 118, 502, 505}\n",
      "NodeID: 1, Community Value Type: <class 'set'>, Value: {1536, 1, 1926, 1691, 292, 171, 175, 1202, 1589, 54, 439, 950, 1855, 576, 968, 1865, 587, 1239, 856, 480, 1120, 1634, 101, 108, 1005}\n",
      "NodeID: 2, Community Value Type: <class 'set'>, Value: {2, 1550, 1058, 34, 36, 40, 41, 1579, 559, 561, 1587, 1076, 51, 1079, 59, 1086, 1090, 591, 1617, 1107, 596, 1108, 1619, 1111, 1620, 601, 1626, 93, 1630, 94, 1128, 106, 618, 620, 1130, 1137, 1139, 117, 1146, 1658, 124, 127, 1153, 1154, 644, 1669, 1159, 136, 1162, 652, 654, 149, 151, 1176, 156, 669, 165, 1709, 179, 697, 701, 711, 1734, 716, 719, 1749, 749, 750, 1786, 1792, 1295, 788, 1815, 1816, 282, 800, 1827, 808, 1834, 318, 1350, 1356, 1358, 342, 867, 1902, 369, 1922, 928, 428, 1003}\n",
      "NodeID: 3, Community Value Type: <class 'set'>, Value: {1664, 1537, 3, 1799, 1547, 1293, 1168, 272, 1557, 24, 300, 1198, 562, 1083, 1627, 347, 736, 99, 872, 1384, 1385, 489, 1131, 1520, 1779, 116, 243, 1781, 250, 1276}\n",
      "NodeID: 4, Community Value Type: <class 'set'>, Value: {513, 4, 1032, 1545, 523, 528, 532, 1044, 1558, 1560, 1049, 1532, 1053, 545, 546, 1569, 551, 1065, 1577, 1069, 1581, 1586, 567, 1603, 593, 86, 1113, 98, 1124, 1642, 107, 1643, 111, 1140, 1142, 119, 122, 1660, 130, 133, 650, 1167, 142, 1177, 1701, 1703, 1192, 167, 1214, 1218, 205, 717, 208, 1747, 1750, 1775, 1271, 1801, 1884, 350, 1376, 353, 866, 1890, 870, 1386, 877, 1389, 366, 368, 882, 1909, 376, 378, 1403, 382, 384, 385, 1412, 1413, 1414, 905, 1930, 1932, 910, 402, 1942, 407, 920, 1431, 1441, 1443, 931, 1445, 1956, 1449, 1961, 427, 1452, 1454, 942, 1969, 1458, 948, 1464, 1977, 1979, 1980, 1468, 447, 1984, 450, 963, 1987, 451, 1993, 1482, 1483, 1488, 979, 1494, 473, 988, 477, 1504, 996, 484, 486, 487, 492, 495, 1010, 1015, 1531, 508, 1533, 511}\n"
     ]
    }
   ],
   "source": [
    "# 检查前 5 个节点的原始社区数据\n",
    "for i, (node_id, data) in enumerate(g.nodes(data=True)):\n",
    "    if i < 5:\n",
    "        # 直接打印原始值，查看它是单个数字还是列表/集合\n",
    "        print(f\"NodeID: {node_id}, Community Value Type: {type(data['community'])}, Value: {data['community']}\")\n",
    "    else:\n",
    "        break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "NodeID: 0, All Attributes: {'label_0': 10.648366964335429, 'label_1': 29.0682943442907, 'community': {0, 1036, 146, 150, 1818, 1050, 810, 47, 943, 560, 1200, 57, 190, 1472, 1861, 1096, 456, 1352, 95, 121, 613, 1639, 118, 502, 505}}\n",
      "NodeID: 1, All Attributes: {'label_0': 1.9667720268725155, 'label_1': 11.395291076597774, 'community': {1536, 1, 1926, 1691, 292, 171, 175, 1202, 1589, 54, 439, 950, 1855, 576, 968, 1865, 587, 1239, 856, 480, 1120, 1634, 101, 108, 1005}}\n",
      "NodeID: 2, All Attributes: {'label_0': 0.9590790833524321, 'label_1': 11.305724888937258, 'community': {2, 1550, 1058, 34, 36, 40, 41, 1579, 559, 561, 1587, 1076, 51, 1079, 59, 1086, 1090, 591, 1617, 1107, 596, 1108, 1619, 1111, 1620, 601, 1626, 93, 1630, 94, 1128, 106, 618, 620, 1130, 1137, 1139, 117, 1146, 1658, 124, 127, 1153, 1154, 644, 1669, 1159, 136, 1162, 652, 654, 149, 151, 1176, 156, 669, 165, 1709, 179, 697, 701, 711, 1734, 716, 719, 1749, 749, 750, 1786, 1792, 1295, 788, 1815, 1816, 282, 800, 1827, 808, 1834, 318, 1350, 1356, 1358, 342, 867, 1902, 369, 1922, 928, 428, 1003}}\n",
      "NodeID: 3, All Attributes: {'label_0': 10.165280294892142, 'label_1': 0.2938422943544113, 'community': {1664, 1537, 3, 1799, 1547, 1293, 1168, 272, 1557, 24, 300, 1198, 562, 1083, 1627, 347, 736, 99, 872, 1384, 1385, 489, 1131, 1520, 1779, 116, 243, 1781, 250, 1276}}\n",
      "NodeID: 4, All Attributes: {'label_0': 10.56686484286657, 'label_1': 38.40738056312782, 'community': {513, 4, 1032, 1545, 523, 528, 532, 1044, 1558, 1560, 1049, 1532, 1053, 545, 546, 1569, 551, 1065, 1577, 1069, 1581, 1586, 567, 1603, 593, 86, 1113, 98, 1124, 1642, 107, 1643, 111, 1140, 1142, 119, 122, 1660, 130, 133, 650, 1167, 142, 1177, 1701, 1703, 1192, 167, 1214, 1218, 205, 717, 208, 1747, 1750, 1775, 1271, 1801, 1884, 350, 1376, 353, 866, 1890, 870, 1386, 877, 1389, 366, 368, 882, 1909, 376, 378, 1403, 382, 384, 385, 1412, 1413, 1414, 905, 1930, 1932, 910, 402, 1942, 407, 920, 1431, 1441, 1443, 931, 1445, 1956, 1449, 1961, 427, 1452, 1454, 942, 1969, 1458, 948, 1464, 1977, 1979, 1980, 1468, 447, 1984, 450, 963, 1987, 451, 1993, 1482, 1483, 1488, 979, 1494, 473, 988, 477, 1504, 996, 484, 486, 487, 492, 495, 1010, 1015, 1531, 508, 1533, 511}}\n"
     ]
    }
   ],
   "source": [
    "# 检查前 5 个节点的全部属性，寻找真正的社区标签 ID\n",
    "for i, (node_id, data) in enumerate(g.nodes(data=True)):\n",
    "    if i < 5:\n",
    "        print(f\"NodeID: {node_id}, All Attributes: {data}\")\n",
    "    else:\n",
    "        break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "成功识别 32 个社区。\n"
     ]
    }
   ],
   "source": [
    "# 目标：将每个节点ID映射到它所属的社区的索引 (0, 1, 2, ...)\n",
    "node_to_community_id = {}\n",
    "for community_index, node_list in enumerate(coms):\n",
    "    # node_list 是一个社区的所有成员 (例如: {0, 1036, 146, ...})\n",
    "    for node_id in node_list:\n",
    "        node_to_community_id[node_id] = community_index \n",
    "\n",
    "print(f\"成功识别 {len(coms)} 个社区。\")\n",
    "\n",
    "# 现在 node_to_community_id 字典中存储着每个节点的真实社区 ID（从 0 开始）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "✅ 修正后的节点属性和真实划分已导出到: xmark_continuous_node_attributes_and_truth_FIXED.csv\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import networkx as nx\n",
    "\n",
    "# ... 假设上述代码已运行，g 和 node_to_community_id 已定义 ...\n",
    "\n",
    "node_data = []\n",
    "for node_id, data in g.nodes(data=True):\n",
    "    row = {\n",
    "        'NodeID': node_id,\n",
    "        # 修正的关键：从映射字典中获取单个整数 ID\n",
    "        'CommunityID': node_to_community_id[node_id], \n",
    "        'Label_0': data['label_0'],\n",
    "        'Label_1': data['label_1'],\n",
    "    }\n",
    "    node_data.append(row)\n",
    "\n",
    "# 转换为 DataFrame 并导出为 CSV\n",
    "df_nodes = pd.DataFrame(node_data)\n",
    "attributes_path = \"xmark_continuous_node_attributes_and_truth_FIXED.csv\"\n",
    "df_nodes.to_csv(attributes_path, index=False)\n",
    "\n",
    "print(f\" 修正后的节点属性和真实划分已导出到: {attributes_path}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "atrribute-graph",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
