File size: 16,709 Bytes
593899c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
import json
import numpy as np
import networkx as nx
from collections import Counter, defaultdict
import random
import scipy.sparse as sp
from scipy.sparse.linalg import eigsh
import sys
import os

try:
    import community as community_louvain
except ImportError:
    print("Warning: python-louvain package not found. Installing...")
    import subprocess
    subprocess.check_call([sys.executable, "-m", "pip", "install", "python-louvain"])
    import community as community_louvain

def load_graph_from_json(json_file):
    """Load graph from a JSON file with nodes."""
    nodes = []
    
    try:
        # First try to parse as a single JSON array or object
        with open(json_file, 'r', encoding='utf-8') as f:
            content = f.read().strip()
            try:
                data = json.loads(content)
                if isinstance(data, list):
                    nodes = data
                else:
                    nodes = [data]
            except json.JSONDecodeError:
                # Reset and try parsing line by line
                nodes = []
                with open(json_file, 'r') as f:
                    for line in f:
                        line = line.strip()
                        if line:  # Skip empty lines
                            try:
                                node_data = json.loads(line)
                                nodes.append(node_data)
                            except json.JSONDecodeError:
                                continue
    except Exception as e:
        print(f"Error loading graph: {e}")
        return []
    
    return nodes

def build_networkx_graph(nodes):
    """Build a NetworkX graph from the loaded node data."""
    G = nx.Graph()
    
    # Add nodes with attributes
    for node in nodes:
        G.add_node(
            node['node_id'],
            label=node['label'],
            text=node['text'],
            mask=node['mask']
        )
    
    # Add edges
    for node in nodes:
        node_id = node['node_id']
        for neighbor_id in node['neighbors']:
            if G.has_node(neighbor_id):  # Only add edge if both nodes exist
                G.add_edge(node_id, neighbor_id)
    
    return G

def analyze_graph_properties(G):
    """Analyze the properties of the graph as specified in the requirements."""
    properties = {}
    
    # Mask distribution (Train/Validation/Test)
    masks = [G.nodes[n]['mask'] for n in G.nodes]
    mask_distribution = Counter(masks)
    properties['mask_distribution'] = {k: v/len(G.nodes) for k, v in mask_distribution.items()}
    
    # Label distribution
    labels = [G.nodes[n]['label'] for n in G.nodes]
    label_distribution = Counter(labels)
    properties['label_distribution'] = {k: v/len(G.nodes) for k, v in label_distribution.items()}
    
    # Graph density
    properties['density'] = nx.density(G)
    
    # Degree distribution
    degrees = [d for n, d in G.degree()]
    degree_counts = Counter(degrees)
    properties['degree_distribution'] = {k: v/len(G.nodes) for k, v in degree_counts.items()}
    
    # Community structure (using Louvain algorithm)
    try:
        communities = community_louvain.best_partition(G)
        community_counts = Counter(communities.values())
        properties['community_distribution'] = {k: v/len(G.nodes) for k, v in community_counts.items()}
    except:
        properties['community_distribution'] = {}
    
    # Spectral characteristics
    if len(G) > 1:
        try:
            laplacian = nx.normalized_laplacian_matrix(G)
            if sp.issparse(laplacian) and laplacian.shape[0] > 1:
                try:
                    k = min(5, laplacian.shape[0]-1)
                    if k > 0:
                        eigenvalues = eigsh(laplacian, k=k, which='SM', return_eigenvectors=False)
                        properties['spectral_eigenvalues'] = sorted(eigenvalues.tolist())
                    else:
                        properties['spectral_eigenvalues'] = []
                except:
                    properties['spectral_eigenvalues'] = []
            else:
                properties['spectral_eigenvalues'] = []
        except:
            properties['spectral_eigenvalues'] = []
    else:
        properties['spectral_eigenvalues'] = []
    
    # Connectivity characteristics
    properties['connected_components'] = nx.number_connected_components(G)
    largest_cc = max(nx.connected_components(G), key=len)
    properties['largest_cc_ratio'] = len(largest_cc) / len(G.nodes)
    
    return properties

def sample_graph_preserving_properties(G, percentage, original_properties):
    """Sample a percentage of nodes while preserving graph properties."""
    num_nodes = len(G.nodes)
    num_nodes_to_sample = max(1, int(num_nodes * percentage / 100))
    
    # If the graph is too small, just return it
    if num_nodes <= num_nodes_to_sample:
        return G, {n: n for n in G.nodes}
    
    # 1. Preserve label and mask distribution (top priority per requirements)
    mask_label_groups = defaultdict(list)
    for node in G.nodes:
        mask = G.nodes[node]['mask']
        label = G.nodes[node]['label']
        mask_label_groups[(mask, label)].append(node)
    
    # Calculate how many nodes to sample from each mask-label group
    group_counts = {}
    for (mask, label), nodes in mask_label_groups.items():
        mask_ratio = original_properties['mask_distribution'].get(mask, 0)
        label_ratio = original_properties['label_distribution'].get(label, 0)
        
        # Calculate joint probability
        joint_ratio = mask_ratio * label_ratio / sum(
            original_properties['mask_distribution'].get(m, 0) * 
            original_properties['label_distribution'].get(l, 0)
            for m in original_properties['mask_distribution']
            for l in original_properties['label_distribution']
        )
        
        target_count = int(num_nodes_to_sample * joint_ratio)
        # Ensure at least one node from non-empty groups
        group_counts[(mask, label)] = max(1, target_count) if nodes else 0
    
    # Adjust to match the exact sample size
    total_count = sum(group_counts.values())
    if total_count != num_nodes_to_sample:
        diff = num_nodes_to_sample - total_count
        groups = list(group_counts.keys())
        
        if diff > 0:
            # Add nodes to groups proportionally to their size
            group_sizes = [len(mask_label_groups[g]) for g in groups]
            group_probs = [s/sum(group_sizes) for s in group_sizes]
            
            for _ in range(diff):
                group = random.choices(groups, weights=group_probs)[0]
                if len(mask_label_groups[group]) > group_counts[group]:
                    group_counts[group] += 1
        else:
            # Remove nodes from groups with excess
            groups_with_excess = [(g, c) for g, c in group_counts.items() 
                                 if c > 1 and c > len(mask_label_groups[g]) * 0.2]
            groups_with_excess.sort(key=lambda x: x[1], reverse=True)
            
            for i in range(min(-diff, len(groups_with_excess))):
                group_counts[groups_with_excess[i][0]] -= 1
    
    # 2. Sample nodes from each group, prioritizing connectivity and community structure
    sampled_nodes = []
    
    # First try to get community structure
    try:
        communities = community_louvain.best_partition(G)
    except:
        communities = {node: 0 for node in G.nodes}  # Fallback if community detection fails
    
    # Sample from each mask-label group
    for (mask, label), count in group_counts.items():
        candidates = mask_label_groups[(mask, label)]
        
        if len(candidates) <= count:
            # Take all nodes in this group
            sampled_nodes.extend(candidates)
        else:
            # Score nodes based on degree and community representation
            node_scores = {}
            for node in candidates:
                # Higher score for higher degree nodes (connectivity)
                degree_score = G.degree(node) / max(1, max(d for n, d in G.degree()))
                
                # Higher score for nodes in underrepresented communities
                comm = communities.get(node, 0)
                comm_sampled = sum(1 for n in sampled_nodes if communities.get(n, -1) == comm)
                comm_total = sum(1 for n in G.nodes if communities.get(n, -1) == comm)
                comm_score = 1 - (comm_sampled / max(1, comm_total))
                
                # Combined score (prioritize connectivity slightly more)
                node_scores[node] = 0.6 * degree_score + 0.4 * comm_score
            
            # Sort candidates by score and select the top ones
            sorted_candidates = sorted(candidates, key=lambda n: node_scores.get(n, 0), reverse=True)
            sampled_nodes.extend(sorted_candidates[:count])
    
    # 3. Create the sampled subgraph
    sampled_G = G.subgraph(sampled_nodes).copy()
    
    # 4. Improve connectivity if needed
    if nx.number_connected_components(sampled_G) > original_properties['connected_components']:
        # Try to improve connectivity by swapping nodes
        non_sampled = [n for n in G.nodes if n not in sampled_nodes]
        
        # Calculate betweenness centrality for non-sampled nodes
        betweenness = {}
        for node in non_sampled:
            # Count how many different components this node would connect
            neighbors = list(G.neighbors(node))
            sampled_neighbors = [n for n in neighbors if n in sampled_nodes]
            
            if not sampled_neighbors:
                continue
                
            components_connected = set()
            for n in sampled_neighbors:
                for comp_idx, comp in enumerate(nx.connected_components(sampled_G)):
                    if n in comp:
                        components_connected.add(comp_idx)
                        break
            
            betweenness[node] = len(components_connected)
        
        # Sort non-sampled nodes by how many components they would connect
        connector_nodes = [(n, b) for n, b in betweenness.items() if b > 1]
        connector_nodes.sort(key=lambda x: x[1], reverse=True)
        
        # Try to improve connectivity by swapping nodes
        for connector, _ in connector_nodes:
            # Find a node to swap out (prefer low degree nodes from well-represented groups)
            mask = G.nodes[connector]['mask']
            label = G.nodes[connector]['label']
            
            # Find nodes with the same mask and label
            same_group = [n for n in sampled_nodes 
                         if G.nodes[n]['mask'] == mask and G.nodes[n]['label'] == label]
            
            if not same_group:
                continue
                
            # Sort by degree (ascending)
            same_group.sort(key=lambda n: sampled_G.degree(n))
            
            # Swap the node with lowest degree
            to_remove = same_group[0]
            sampled_nodes.remove(to_remove)
            sampled_nodes.append(connector)
            
            # Update the sampled subgraph
            sampled_G = G.subgraph(sampled_nodes).copy()
            
            # Stop if we've reached the desired connectivity
            if nx.number_connected_components(sampled_G) <= original_properties['connected_components']:
                break
    
    # 5. Relabel nodes to have consecutive IDs starting from 0
    node_mapping = {old_id: new_id for new_id, old_id in enumerate(sorted(sampled_nodes))}
    relabeled_G = nx.relabel_nodes(sampled_G, node_mapping)
    
    # Return the sampled graph and the inverse mapping (new_id -> original_id)
    inverse_mapping = {new_id: old_id for old_id, new_id in node_mapping.items()}
    return relabeled_G, inverse_mapping

def graph_to_json_format(G):
    """Convert a NetworkX graph to the required JSON format."""
    result = []
    
    for node_id in sorted(G.nodes):
        node_data = {
            "node_id": int(node_id),
            "label": G.nodes[node_id]['label'],
            "text": G.nodes[node_id]['text'],
            "neighbors": sorted([int(n) for n in G.neighbors(node_id)]),
            "mask": G.nodes[node_id]['mask']
        }
        
        result.append(node_data)
    
    return result

def sample_text_attribute_graph(input_file, output_file, percentage):
    """Main function to sample a text attribute graph and preserve its properties."""
    # Load the graph data
    print(f"Loading graph from {input_file}...")
    nodes = load_graph_from_json(input_file)
    
    if not nodes:
        print("Failed to load nodes from the input file.")
        return None, None, None
    
    print(f"Loaded {len(nodes)} nodes.")
    
    # Build the NetworkX graph
    print("Building graph...")
    G = build_networkx_graph(nodes)
    print(f"Built graph with {len(G.nodes)} nodes and {len(G.edges)} edges.")
    
    # Analyze the original graph properties
    print("Analyzing original graph properties...")
    original_properties = analyze_graph_properties(G)
    
    # Sample the graph
    print(f"Sampling {percentage}% of the nodes...")
    sampled_G, inverse_mapping = sample_graph_preserving_properties(G, percentage, original_properties)
    print(f"Sampled graph has {len(sampled_G.nodes)} nodes and {len(sampled_G.edges)} edges.")
    
    # Convert the sampled graph to JSON format
    print("Converting sampled graph to JSON format...")
    sampled_data = graph_to_json_format(sampled_G)
    
    # Save the sampled graph
    print(f"Saving sampled graph to {output_file}...")
    with open(output_file, 'w') as f:
        json.dump(sampled_data, f, indent=2)
    
    # Analyze the sampled graph properties
    print("Analyzing sampled graph properties...")
    sampled_properties = analyze_graph_properties(sampled_G)
    
    # Print comparison of original and sampled properties
    print("\nComparison of Graph Properties:")
    print(f"{'Property':<25} {'Original':<15} {'Sampled':<15}")
    print("-" * 55)
    print(f"{'Number of nodes':<25} {len(G.nodes):<15} {len(sampled_G.nodes):<15}")
    print(f"{'Number of edges':<25} {len(G.edges):<15} {len(sampled_G.edges):<15}")
    print(f"{'Density':<25} {original_properties['density']:.4f}{'':>10} {sampled_properties['density']:.4f}{'':>10}")
    
    print("\nMask Distribution:")
    print(f"{'Mask':<10} {'Original %':<15} {'Sampled %':<15}")
    print("-" * 40)
    for mask in sorted(set(original_properties['mask_distribution'].keys()) | set(sampled_properties['mask_distribution'].keys())):
        orig_pct = original_properties['mask_distribution'].get(mask, 0) * 100
        sampled_pct = sampled_properties['mask_distribution'].get(mask, 0) * 100
        print(f"{mask:<10} {orig_pct:.2f}%{'':>9} {sampled_pct:.2f}%{'':>9}")
    
    print("\nLabel Distribution:")
    print(f"{'Label':<10} {'Original %':<15} {'Sampled %':<15}")
    print("-" * 40)
    for label in sorted(set(original_properties['label_distribution'].keys()) | set(sampled_properties['label_distribution'].keys())):
        orig_pct = original_properties['label_distribution'].get(label, 0) * 100
        sampled_pct = sampled_properties['label_distribution'].get(label, 0) * 100
        print(f"{label:<10} {orig_pct:.2f}%{'':>9} {sampled_pct:.2f}%{'':>9}")
    
    print("\nConnectivity:")
    print(f"Connected components: {original_properties['connected_components']} (original) vs {sampled_properties['connected_components']} (sampled)")
    
    return sampled_G, original_properties, sampled_properties

def main():
    """Command-line interface."""
    if len(sys.argv) != 4:
        print("Usage: python sample_graph.py input_file output_file percentage")
        sys.exit(1)
    
    input_file = sys.argv[1]
    output_file = sys.argv[2]
    try:
        percentage = float(sys.argv[3])
        if percentage <= 0 or percentage > 100:
            raise ValueError("Percentage must be between 0 and 100")
    except ValueError:
        print("Error: Percentage must be a number between 0 and 100")
        sys.exit(1)
    
    sample_text_attribute_graph(input_file, output_file, percentage)

if __name__ == "__main__":
    main()