Em4e commited on
Commit
160f565
·
verified ·
1 Parent(s): f5e1ae0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +131 -602
app.py CHANGED
@@ -1,84 +1,61 @@
1
- import streamlit as st
2
- import pandas as pd
3
- import numpy as np
4
- import networkit as nk
5
- import matplotlib.pyplot as plt
6
- import seaborn as sns
7
- import plotly.express as px
8
- import plotly.graph_objects as go
9
- from plotly.subplots import make_subplots
10
- import time
11
- import gc
12
- from io import StringIO
13
- import random
14
- from collections import defaultdict
15
 
16
- # Set page config
17
- st.set_page_config(
18
- page_title="Website Link Impact Analyzer",
19
- page_icon="🔗",
20
- layout="wide",
21
- initial_sidebar_state="expanded"
 
22
  )
23
 
24
- # Global cache for WWW graph
25
- if 'www_graph_cache' not in st.session_state:
26
- st.session_state.www_graph_cache = None
 
 
 
 
 
 
 
 
 
 
27
 
28
- def load_graph_from_csv_networkit(file_content, file_name):
29
- """
30
- Load page links from CSV file using NetworKit.
31
- """
32
- try:
33
- # Read CSV content
34
- df = pd.read_csv(StringIO(file_content))
35
-
36
- # Check required columns with user-friendly names
37
- required_cols = ['FROM', 'TO']
38
- if not all(col in df.columns for col in required_cols):
39
- st.error(f"""
40
- ❌ **File Format Error**
41
-
42
- Your CSV file needs these column names:
43
- - **FROM** (the page that has the link)
44
- - **TO** (the page being linked to)
45
-
46
- Your file has: {', '.join(df.columns)}
47
- """)
48
- return None, None, None
49
-
50
- # Clean data
51
- df = df.dropna(subset=['FROM', 'TO'])
52
- df['FROM'] = df['FROM'].astype(str)
53
- df['TO'] = df['TO'].astype(str)
54
-
55
- if len(df) == 0:
56
- st.error(f"❌ No valid page links found in {file_name}")
57
- return None, None, None
58
-
59
- # Get unique nodes and create mapping
60
- all_nodes = list(set(df['FROM'].tolist() + df['TO'].tolist()))
61
- node_to_idx = {node: i for i, node in enumerate(all_nodes)}
62
-
63
- # Create NetworKit graph
64
- G = nk.Graph(n=len(all_nodes), weighted=False, directed=True)
65
-
66
- # Add edges
67
- for _, row in df.iterrows():
68
- source_idx = node_to_idx[row['FROM']]
69
- target_idx = node_to_idx[row['TO']]
70
- G.addEdge(source_idx, target_idx)
71
-
72
- return G, all_nodes, node_to_idx
73
-
74
- except Exception as e:
75
- st.error(f"❌ **Error reading file**: {str(e)}")
76
- st.info("💡 **Tip**: Make sure your file is a valid CSV with FROM and TO columns for page links")
77
- return None, None, None
78
 
79
  def create_www_graph_networkit(n_nodes, m_edges, seed=42):
80
  """
81
- Create a realistic internet simulation using NetworKit.
82
  """
83
  cache_key = (n_nodes, m_edges, seed)
84
 
@@ -89,546 +66,98 @@ def create_www_graph_networkit(n_nodes, m_edges, seed=42):
89
  # Set random seed for NetworKit
90
  nk.setSeed(seed, False)
91
 
92
- # Create Barabási-Albert graph using NetworKit's generator
93
- generator = nk.generators.BarabasiAlbertGenerator(k=m_edges, nMax=n_nodes, n0=m_edges)
94
- www_graph = generator.generate()
95
-
96
- # Make it directed
97
- if not www_graph.isDirected():
98
- # Convert to directed by creating a new directed graph
99
- directed_graph = nk.Graph(n=www_graph.numberOfNodes(), weighted=False, directed=True)
100
- for u, v in www_graph.iterEdges():
101
- directed_graph.addEdge(u, v)
102
- directed_graph.addEdge(v, u) # Make bidirectional
103
- www_graph = directed_graph
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
  # Cache the result
106
  st.session_state.www_graph_cache = (cache_key, www_graph)
107
  return www_graph
108
 
109
- def process_configuration_networkit(www_graph, kalicube_graph, kalicube_nodes,
110
- min_connections=5, max_connections=50):
111
- """
112
- Test how your page network performs in the real internet using NetworKit.
113
- """
114
- # Get WWW graph info
115
- www_node_count = www_graph.numberOfNodes()
116
- kalicube_node_count = len(kalicube_nodes)
117
-
118
- # Create node mapping for kalicube nodes
119
- kalicube_offset = www_node_count
120
- kalicube_node_mapping = {}
121
-
122
- for i, node in enumerate(kalicube_nodes):
123
- new_node_id = kalicube_offset + i
124
- kalicube_node_mapping[node] = new_node_id
125
-
126
- # Create merged graph
127
- total_nodes = www_node_count + kalicube_node_count
128
- merged_graph = nk.Graph(n=total_nodes, weighted=False, directed=True)
129
-
130
- # Add WWW edges
131
- for u, v in www_graph.iterEdges():
132
- merged_graph.addEdge(u, v)
133
-
134
- # Add kalicube edges with new node IDs
135
- kalicube_idx_to_node = {i: node for i, node in enumerate(kalicube_nodes)}
136
-
137
- for u, v in kalicube_graph.iterEdges():
138
- source_node = kalicube_idx_to_node[u]
139
- target_node = kalicube_idx_to_node[v]
140
- new_source_id = kalicube_node_mapping[source_node]
141
- new_target_id = kalicube_node_mapping[target_node]
142
- merged_graph.addEdge(new_source_id, new_target_id)
143
-
144
- # Randomly connect kalicube pages to WWW
145
- n_connections = min(min_connections, www_node_count, kalicube_node_count)
146
-
147
- www_sample = random.sample(range(www_node_count), n_connections)
148
- kalicube_sample = random.sample(list(kalicube_node_mapping.values()), n_connections)
149
-
150
- for www_node, kalicube_node in zip(www_sample, kalicube_sample):
151
- merged_graph.addEdge(www_node, kalicube_node)
152
-
153
- # Calculate PageRank using NetworKit
154
- try:
155
  pagerank_algo = nk.centrality.PageRank(merged_graph, damp=0.85, tol=1e-6)
156
- pagerank_algo.run()
157
- pagerank_values = pagerank_algo.scores()
158
- except Exception as e:
159
- st.warning(f"PageRank calculation failed: {e}. Using degree centrality instead.")
160
- # Fallback to degree centrality
161
- degree_algo = nk.centrality.DegreeCentrality(merged_graph, normalized=True)
162
- degree_algo.run()
163
- pagerank_values = degree_algo.scores()
164
 
165
- # Extract PageRank values for kalicube nodes
166
- pagerank_dict = {}
167
- for node, node_id in kalicube_node_mapping.items():
168
- pagerank_dict[node] = pagerank_values[node_id] if node_id < len(pagerank_values) else 0.0
169
 
170
- return pagerank_dict
 
 
 
 
 
171
 
172
- def create_comparison_dataframe(pagerank_old_dict, pagerank_new_dict, simulation_id):
173
- """
174
- Compare before and after results.
175
- """
176
- # Find pages that appear in both tests
177
- old_urls = set(pagerank_old_dict.keys())
178
- new_urls = set(pagerank_new_dict.keys())
179
- common_urls = old_urls & new_urls
180
-
181
- if not common_urls:
182
- return pd.DataFrame()
183
-
184
- # Create comparison data
185
- comparison_data = []
186
-
187
- # Sort pages by importance for ranking
188
- old_sorted = sorted(pagerank_old_dict.items(), key=lambda x: x[1], reverse=True)
189
- new_sorted = sorted(pagerank_new_dict.items(), key=lambda x: x[1], reverse=True)
190
-
191
- # Create ranking mappings
192
- old_ranks = {url: rank + 1 for rank, (url, _) in enumerate(old_sorted)}
193
- new_ranks = {url: rank + 1 for rank, (url, _) in enumerate(new_sorted)}
194
-
195
- for url in common_urls:
196
- importance_before = pagerank_old_dict[url]
197
- importance_after = pagerank_new_dict[url]
198
- rank_before = old_ranks[url]
199
- rank_after = new_ranks[url]
200
-
201
- importance_change = importance_after - importance_before
202
- importance_change_pct = (importance_change / importance_before) * 100 if importance_before > 0 else 0
203
- rank_change = rank_after - rank_before
204
- rank_change_pct = (rank_change / rank_before) * 100 if rank_before > 0 else 0
205
-
206
- comparison_data.append({
207
- 'Page_URL': url,
208
- 'Importance_Before': importance_before,
209
- 'Importance_After': importance_after,
210
- 'Rank_Before': rank_before,
211
- 'Rank_After': rank_after,
212
- 'Importance_Change': importance_change,
213
- 'Importance_Change_%': importance_change_pct,
214
- 'Rank_Change': rank_change,
215
- 'Rank_Change_%': rank_change_pct,
216
- 'Test_Number': simulation_id
217
- })
218
-
219
- return pd.DataFrame(comparison_data)
220
 
221
- def run_single_simulation(simulation_id, kalicube_graph_old, kalicube_graph_new,
222
- kalicube_nodes_old, kalicube_nodes_new,
223
- www_nodes, www_edges, min_conn, max_conn):
224
- """
225
- Run one test comparing before and after.
226
- """
227
- sim_seed = 42 + simulation_id
228
- random.seed(sim_seed)
229
- np.random.seed(sim_seed)
230
 
231
- # Create internet simulation
232
- www_graph = create_www_graph_networkit(www_nodes, www_edges, sim_seed)
233
-
234
- # Test original setup
235
- importance_old_dict = process_configuration_networkit(
236
- www_graph, kalicube_graph_old, kalicube_nodes_old, min_conn, max_conn
237
- )
238
-
239
- # Test new setup
240
- importance_new_dict = process_configuration_networkit(
241
- www_graph, kalicube_graph_new, kalicube_nodes_new, min_conn, max_conn
242
- )
243
-
244
- # Compare results
245
- comparison_df = create_comparison_dataframe(
246
- importance_old_dict, importance_new_dict, simulation_id
247
- )
248
-
249
- if comparison_df.empty:
250
- return None, None
251
-
252
- # Calculate summary
253
- total_before = comparison_df['Importance_Before'].sum()
254
- total_after = comparison_df['Importance_After'].sum()
255
- total_change = total_after - total_before
256
- change_pct = (total_change / total_before) * 100 if total_before > 0 else 0
257
-
258
- rank_changes = comparison_df['Rank_Change'].values
259
- rank_improvements = np.sum(rank_changes < 0) # Lower rank number = better
260
- rank_drops = np.sum(rank_changes > 0)
261
- rank_unchanged = np.sum(rank_changes == 0)
262
- avg_rank_change = np.mean(rank_changes)
263
-
264
- result = {
265
- 'Test_Number': simulation_id + 1,
266
- 'Total_Before': total_before,
267
- 'Total_After': total_after,
268
- 'Total_Change': total_change,
269
- 'Change_Percent': change_pct,
270
- 'Pages_Improved': rank_improvements,
271
- 'Pages_Dropped': rank_drops,
272
- 'Pages_Unchanged': rank_unchanged,
273
- 'Avg_Rank_Change': avg_rank_change
274
- }
275
-
276
- return result, comparison_df
277
 
278
- def get_traffic_light_status(results_df, confidence_threshold=0.7):
279
- """
280
- Simple decision guidance based on test results.
281
- """
282
- total_tests = len(results_df)
283
- positive_outcomes = (results_df['Total_Change'] > 0).sum()
284
- negative_outcomes = (results_df['Total_Change'] < 0).sum()
285
-
286
- positive_ratio = positive_outcomes / total_tests
287
- negative_ratio = negative_outcomes / total_tests
288
- mean_impact = results_df['Change_Percent'].mean()
289
-
290
- # Simple traffic light logic
291
- if positive_ratio >= confidence_threshold and mean_impact > 1.0:
292
- return "🟢", "✅ GO AHEAD - Your changes look great!", "go", "Most tests show good results. Your changes should help your page rankings."
293
- elif positive_ratio >= confidence_threshold and mean_impact > 0:
294
- return "🟡", "⚠️ PROCEED CAREFULLY - Small improvements expected", "caution", "Tests show some improvement, but it's modest. Consider if the effort is worth it."
295
- elif negative_ratio >= confidence_threshold and mean_impact < -1.0:
296
- return "🔴", "❌ STOP - Your changes may hurt your page rankings", "stop", "Most tests show negative results. Consider revising your changes before implementing."
297
- elif negative_ratio >= confidence_threshold and mean_impact < 0:
298
- return "🟡", "⚠️ PROCEED CAREFULLY - Some negative impact expected", "caution", "Tests show some negative impact. Monitor closely if you proceed."
299
- else:
300
- return "🟡", "🤷 MIXED RESULTS - Hard to predict", "caution", "Test results are mixed. Consider running more tests or getting expert advice."
301
 
302
- def create_simple_visualizations(results_df, all_comparisons_df, confidence_threshold=0.7):
303
- """
304
- Create easy-to-understand visualizations.
305
- """
306
- # Traffic Light Assessment
307
- traffic_emoji, traffic_status, traffic_level, explanation = get_traffic_light_status(results_df, confidence_threshold)
308
-
309
- st.markdown("## 🚦 **Should You Make These Changes?**")
310
-
311
- # Big, clear recommendation
312
- if traffic_level == "go":
313
- st.success(f"# {traffic_emoji}")
314
- st.success(f"## {traffic_status}")
315
- st.info(f"**Why:** {explanation}")
316
- elif traffic_level == "stop":
317
- st.error(f"# {traffic_emoji}")
318
- st.error(f"## {traffic_status}")
319
- st.warning(f"**Why:** {explanation}")
320
- else:
321
- st.warning(f"# {traffic_emoji}")
322
- st.warning(f"## {traffic_status}")
323
- st.info(f"**Why:** {explanation}")
324
-
325
- # Simple metrics in plain English
326
- st.markdown("### 📊 **Test Results Summary**")
327
- col1, col2, col3 = st.columns(3)
328
-
329
- with col1:
330
- positive_tests = (results_df['Total_Change'] > 0).sum()
331
- total_tests = len(results_df)
332
- st.metric("Tests Showing Improvement", f"{positive_tests} out of {total_tests}",
333
- delta=f"{positive_tests/total_tests:.0%} positive")
334
-
335
- with col2:
336
- mean_change = results_df['Change_Percent'].mean()
337
- st.metric("Average Impact on Rankings", f"{mean_change:.1f}%",
338
- delta="Higher is better")
339
-
340
- with col3:
341
- improved_sites = results_df['Pages_Improved'].mean()
342
- st.metric("Pages That Improved (avg)", f"{improved_sites:.0f}",
343
- delta="per test")
344
 
345
- def main():
346
- st.title("🔗 Page Link Impact Analyzer (Powered by NetworKit)")
347
- st.markdown("**Find out if your page link changes will help or hurt your search rankings**")
348
-
349
- # Simple intro
350
- st.info("""
351
- 👋 **Welcome!** This tool helps you test page link changes before you make them.
352
-
353
- **What it does:** Simulates how your link changes might affect your page rankings in search engines.
354
-
355
- **What you need:** Two CSV files - one with your current page links, one with your planned changes.
356
-
357
- ⚡ **Now powered by NetworKit** - A high-performance network analysis toolkit for faster and more efficient analysis!
358
- """)
359
-
360
- # Sidebar - simplified
361
- st.sidebar.header("⚙️ Settings")
362
-
363
- # File uploads with better guidance
364
- st.sidebar.markdown("### 📁 **Step 1: Upload Your Files**")
365
- st.sidebar.markdown("*Need help with file format? Check the 'File Format Help' section below.*")
366
-
367
- old_file = st.sidebar.file_uploader("Current Page Links (CSV)", type=['csv'], key="old",
368
- help="Upload a CSV file with your current page links")
369
- new_file = st.sidebar.file_uploader("Planned Page Links (CSV)", type=['csv'], key="new",
370
- help="Upload a CSV file with your planned page links")
371
-
372
- # Simplified settings
373
- st.sidebar.markdown("### 🎯 **Step 2: Test Settings**")
374
-
375
- num_tests = st.sidebar.select_slider(
376
- "How many tests to run?",
377
- options=[5, 10, 15, 20, 25, 30],
378
- value=10,
379
- help="More tests = more reliable results, but takes longer"
380
- )
381
-
382
- internet_size = st.sidebar.select_slider(
383
- "Internet simulation size",
384
- options=["Small (5K sites)", "Medium (10K sites)", "Large (25K sites)", "Huge (50K sites)"],
385
- value="Medium (10K sites)",
386
- help="Larger = more realistic but slower"
387
- )
388
-
389
- # Convert internet size to numbers
390
- size_map = {
391
- "Small (5K sites)": 5000,
392
- "Medium (10K sites)": 10000,
393
- "Large (25K sites)": 25000,
394
- "Huge (50K sites)": 50000
395
- }
396
- www_nodes = size_map[internet_size]
397
-
398
- # Advanced settings (hidden by default)
399
- with st.sidebar.expander("🔧 Advanced Settings (Optional)"):
400
- confidence_level = st.slider("Confidence level for recommendations", 60, 90, 70, 5,
401
- help="Higher = stricter requirements for green/red lights")
402
- show_details = st.checkbox("Show detailed results", False)
403
- auto_run = st.checkbox("Auto-run when files uploaded", False)
404
-
405
- confidence_threshold = confidence_level / 100
406
-
407
- # Main content
408
- if old_file is not None and new_file is not None:
409
- # Load files
410
- old_content = old_file.getvalue().decode('utf-8')
411
- new_content = new_file.getvalue().decode('utf-8')
412
-
413
- # Show file status
414
- col1, col2 = st.columns(2)
415
- with col1:
416
- st.success(f"✅ **Current Page Links**: {old_file.name}")
417
- with col2:
418
- st.success(f"✅ **Planned Page Links**: {new_file.name}")
419
-
420
- # Load and validate files
421
- with st.spinner("Reading your files..."):
422
- kalicube_graph_old, kalicube_nodes_old, kalicube_url_mapping_old = \
423
- load_graph_from_csv_networkit(old_content, old_file.name)
424
-
425
- kalicube_graph_new, kalicube_nodes_new, kalicube_url_mapping_new = \
426
- load_graph_from_csv_networkit(new_content, new_file.name)
427
-
428
- if kalicube_graph_old is not None and kalicube_graph_new is not None:
429
- # Show what we found
430
- st.markdown("### 📈 **What We Found in Your Files**")
431
- info_col1, info_col2 = st.columns(2)
432
-
433
- with info_col1:
434
- st.info(f"""
435
- **Current Setup:**
436
- - {len(kalicube_nodes_old)} pages
437
- - {kalicube_graph_old.numberOfEdges()} links between them
438
- """)
439
-
440
- with info_col2:
441
- st.info(f"""
442
- **Planned Setup:**
443
- - {len(kalicube_nodes_new)} pages
444
- - {kalicube_graph_new.numberOfEdges()} links between them
445
- """)
446
-
447
- # Big, obvious run button
448
- st.markdown("### 🚀 **Step 3: Run the Test**")
449
-
450
- run_button = st.button("🔬 Test My Changes", type="primary", use_container_width=True)
451
-
452
- if run_button or auto_run:
453
- # Progress with encouraging messages
454
- progress_bar = st.progress(0)
455
- status_text = st.empty()
456
-
457
- encouraging_messages = [
458
- "🔬 Setting up internet simulation...",
459
- "🌐 Connecting your pages to the web...",
460
- "📊 Calculating page importance scores...",
461
- "🎯 Running tests with different scenarios...",
462
- "📈 Almost done! Analyzing results..."
463
- ]
464
-
465
- all_results = []
466
- all_comparisons = []
467
-
468
- start_time = time.time()
469
-
470
- # Run tests with encouragement
471
- for i in range(num_tests):
472
- msg_idx = min(i // max(1, num_tests // len(encouraging_messages)), len(encouraging_messages) - 1)
473
- status_text.text(f"{encouraging_messages[msg_idx]} (Test {i+1}/{num_tests})")
474
- progress_bar.progress((i + 1) / num_tests)
475
-
476
- result, comparison_df = run_single_simulation(
477
- i, kalicube_graph_old, kalicube_graph_new,
478
- kalicube_nodes_old, kalicube_nodes_new,
479
- www_nodes, 2, 5, 25 # simplified parameters
480
- )
481
-
482
- if result is not None:
483
- all_results.append(result)
484
- all_comparisons.append(comparison_df)
485
-
486
- end_time = time.time()
487
-
488
- # Clear progress
489
- progress_bar.empty()
490
- status_text.empty()
491
-
492
- if all_results:
493
- results_df = pd.DataFrame(all_results)
494
- all_comparisons_df = pd.concat(all_comparisons, ignore_index=True) if all_comparisons else pd.DataFrame()
495
-
496
- # Show results
497
- st.success(f"🎉 **Test Complete!** Ran {len(all_results)} tests in {end_time - start_time:.0f} seconds")
498
-
499
- # Create simple visualizations
500
- create_simple_visualizations(results_df, all_comparisons_df, confidence_threshold)
501
-
502
- # Download section
503
- st.markdown("### 💾 **Save Your Results**")
504
- col1, col2 = st.columns(2)
505
-
506
- with col1:
507
- csv_summary = results_df.to_csv(index=False)
508
- st.download_button(
509
- label="📊 Download Summary Report",
510
- data=csv_summary,
511
- file_name=f"website_impact_summary_{int(time.time())}.csv",
512
- mime="text/csv"
513
- )
514
-
515
- with col2:
516
- if not all_comparisons_df.empty:
517
- csv_detailed = all_comparisons_df.to_csv(index=False)
518
- st.download_button(
519
- label="📋 Download Detailed Results",
520
- data=csv_detailed,
521
- file_name=f"website_impact_detailed_{int(time.time())}.csv",
522
- mime="text/csv"
523
- )
524
-
525
- # Show detailed results if requested
526
- if show_details and not all_comparisons_df.empty:
527
- st.markdown("### 🔍 **Detailed Results** (For the curious)")
528
-
529
- # Simple filter
530
- st.markdown("**Filter results:**")
531
- filter_col1, filter_col2 = st.columns(2)
532
- with filter_col1:
533
- min_change = st.number_input("Show changes above (%)",
534
- value=float(all_comparisons_df['Importance_Change_%'].min()),
535
- step=0.1)
536
-
537
- # Apply filter and show
538
- filtered_df = all_comparisons_df[all_comparisons_df['Importance_Change_%'] >= min_change]
539
-
540
- # Rename columns for clarity
541
- display_df = filtered_df.copy()
542
- display_df = display_df.rename(columns={
543
- 'Page_URL': 'Page URL',
544
- 'Importance_Change_%': 'Impact (%)',
545
- 'Rank_Change': 'Rank Change',
546
- 'Test_Number': 'Test #'
547
- })
548
-
549
- st.dataframe(
550
- display_df[['Page URL', 'Impact (%)', 'Rank_Change', 'Test #']].sort_values('Impact (%)', ascending=False),
551
- use_container_width=True,
552
- height=300
553
- )
554
-
555
- else:
556
- st.error("❌ No test results generated. Please check your files and try again.")
557
-
558
- else:
559
- # Help section when no files uploaded
560
- st.markdown("---")
561
-
562
- # File format help
563
- with st.expander("📋 **File Format Help** - How to prepare your CSV files"):
564
- st.markdown("""
565
- ### ✅ **Correct Format**
566
- Your CSV files need exactly these column names:
567
- - **FROM** = the page that has the link
568
- - **TO** = the page being linked to
569
-
570
- ### 📝 **Example:**
571
- ```
572
- FROM,TO
573
- mysite.com/about,mysite.com/contact
574
- mysite.com/blog/post1,partner.com/resource
575
- partner.com/page,mysite.com/services
576
- ```
577
-
578
- ### 💡 **Tips:**
579
- - Use any spreadsheet program (Excel, Google Sheets) to create these
580
- - Save as CSV format
581
- - Include full URLs or page paths
582
- - Make sure page URLs are consistent (mysite.com/page vs mysite.com/page/ are different!)
583
- - Each row represents one link from one page to another
584
- """)
585
-
586
- with st.expander("🤔 **What This Tool Actually Does** - Explained Simply"):
587
- st.markdown("""
588
- ### 🌐 **The Big Picture**
589
- When you change links between your pages, it affects how search engines see your site. But it's hard to predict the exact impact because the internet is huge and constantly changing.
590
-
591
- ### 🧪 **Our Solution: Virtual Testing**
592
- 1. **We simulate the internet** - Create a virtual version with thousands of pages
593
- 2. **We test your changes** - Run your current page links vs. your planned links
594
- 3. **We repeat many times** - Each test uses slightly different internet conditions
595
- 4. **We analyze the pattern** - Look at whether your changes usually help or hurt
596
-
597
- ### 🚦 **The Traffic Light System**
598
- - **🟢 Green = Go ahead** - Most tests show your changes help
599
- - **🟡 Yellow = Be careful** - Mixed results or small impact
600
- - **🔴 Red = Stop** - Most tests show your changes hurt
601
-
602
- ### 🎯 **Why This Works**
603
- Instead of guessing, you get data-driven confidence about your page link changes!
604
-
605
- ### ⚡ **Powered by NetworKit**
606
- This version uses NetworKit, a high-performance network analysis toolkit that's much faster than traditional tools for analyzing large networks.
607
- """)
608
-
609
- with st.expander("❓ **Common Questions**"):
610
- st.markdown("""
611
- **Q: How accurate is this?**
612
- A: The tool shows trends and probabilities, not exact predictions. It's like weather forecasting - very useful for planning!
613
-
614
- **Q: How long does it take?**
615
- A: Usually 30 seconds to 2 minutes, depending on your settings. NetworKit makes it faster than before!
616
-
617
- **Q: What if I get yellow results?**
618
- A: Yellow means proceed carefully. Consider running more tests, getting expert advice, or monitoring closely if you implement.
619
-
620
- **Q: Can I test multiple scenarios?**
621
- A: Yes! Just upload different "planned changes" files to compare options.
622
-
623
- **Q: What file size limits?**
624
- A: Works best with up to 10,000 page links. Larger files may be slow.
625
-
626
- **Q: What's the difference between pages and websites?**
627
- A: Pages are specific URLs (like mysite.com/about), while websites are domains (like mysite.com). This tool analyzes individual page links.
628
-
629
- **Q: What's NetworKit?**
630
- A: NetworKit is a high-performance network analysis toolkit with optimized C++ algorithms that makes calculations much faster and can handle larger datasets more efficiently.
631
- """)
632
 
633
- if __name__ == "__main__":
634
- main()
 
 
 
 
 
 
1
+ # 1. MODIFY THE SIDEBAR INTERNET SIZE SELECTOR
2
+ # Replace this section in the main() function:
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
+ st.sidebar.markdown("### 🎯 **Step 2: Test Settings**")
5
+
6
+ num_tests = st.sidebar.select_slider(
7
+ "How many tests to run?",
8
+ options=[5, 10, 15, 20, 25, 30],
9
+ value=10,
10
+ help="More tests = more reliable results, but takes longer"
11
  )
12
 
13
+ # CHANGE THIS PART:
14
+ internet_size = st.sidebar.select_slider(
15
+ "Internet simulation size",
16
+ options=[
17
+ "Large (100K sites)",
18
+ "Very Large (250K sites)",
19
+ "Huge (500K sites)",
20
+ "Massive (750K sites)",
21
+ "Ultra (1M sites)"
22
+ ],
23
+ value="Large (100K sites)",
24
+ help="Larger = more realistic but much slower. WARNING: 500K+ may take several minutes per test!"
25
+ )
26
 
27
+ # Convert internet size to numbers
28
+ size_map = {
29
+ "Large (100K sites)": 100000,
30
+ "Very Large (250K sites)": 250000,
31
+ "Huge (500K sites)": 500000,
32
+ "Massive (750K sites)": 750000,
33
+ "Ultra (1M sites)": 1000000
34
+ }
35
+ www_nodes = size_map[internet_size]
36
+
37
+ # 2. ADD PERFORMANCE WARNINGS
38
+ # Add this right after the internet size selector:
39
+
40
+ if www_nodes >= 500000:
41
+ st.sidebar.warning(f"""
42
+ ⚠️ **Performance Warning**:
43
+ {internet_size} will be very slow!
44
+ Expect 2-10 minutes per test.
45
+ Consider using fewer tests.
46
+ """)
47
+ elif www_nodes >= 250000:
48
+ st.sidebar.info(f"""
49
+ ℹ️ **Note**: {internet_size} may take
50
+ 30-60 seconds per test.
51
+ """)
52
+
53
+ # 3. OPTIMIZE THE WWW GRAPH CREATION FUNCTION
54
+ # Replace the create_www_graph_networkit function with this optimized version:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  def create_www_graph_networkit(n_nodes, m_edges, seed=42):
57
  """
58
+ Create a realistic internet simulation using NetworKit - optimized for large graphs.
59
  """
60
  cache_key = (n_nodes, m_edges, seed)
61
 
 
66
  # Set random seed for NetworKit
67
  nk.setSeed(seed, False)
68
 
69
+ # For very large graphs, use different approach
70
+ if n_nodes >= 500000:
71
+ # Use R-MAT generator for very large graphs (faster)
72
+ generator = nk.generators.RmatGenerator(
73
+ scale=int(np.log2(n_nodes)),
74
+ edgeFactor=m_edges,
75
+ a=0.57, b=0.19, c=0.19, d=0.05
76
+ )
77
+ www_graph = generator.generate()
78
+
79
+ # Convert to directed if needed
80
+ if not www_graph.isDirected():
81
+ directed_graph = nk.Graph(n=www_graph.numberOfNodes(), weighted=False, directed=True)
82
+ for u, v in www_graph.iterEdges():
83
+ directed_graph.addEdge(u, v)
84
+ www_graph = directed_graph
85
+ else:
86
+ # Use Barabási-Albert for smaller graphs
87
+ generator = nk.generators.BarabasiAlbertGenerator(k=m_edges, nMax=n_nodes, n0=m_edges)
88
+ www_graph = generator.generate()
89
+
90
+ # Make it directed
91
+ if not www_graph.isDirected():
92
+ directed_graph = nk.Graph(n=www_graph.numberOfNodes(), weighted=False, directed=True)
93
+ for u, v in www_graph.iterEdges():
94
+ directed_graph.addEdge(u, v)
95
+ directed_graph.addEdge(v, u) # Make bidirectional
96
+ www_graph = directed_graph
97
 
98
  # Cache the result
99
  st.session_state.www_graph_cache = (cache_key, www_graph)
100
  return www_graph
101
 
102
+ # 4. OPTIMIZE THE PAGERANK CALCULATION
103
+ # Replace the PageRank section in process_configuration_networkit with:
104
+
105
+ # Calculate PageRank using NetworKit with optimized settings for large graphs
106
+ try:
107
+ if total_nodes >= 500000:
108
+ # Use more relaxed tolerance for very large graphs
109
+ pagerank_algo = nk.centrality.PageRank(merged_graph, damp=0.85, tol=1e-4)
110
+ else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  pagerank_algo = nk.centrality.PageRank(merged_graph, damp=0.85, tol=1e-6)
 
 
 
 
 
 
 
 
112
 
113
+ pagerank_algo.run()
114
+ pagerank_values = pagerank_algo.scores()
 
 
115
 
116
+ except Exception as e:
117
+ st.warning(f"PageRank calculation failed: {e}. Using degree centrality instead.")
118
+ # Fallback to degree centrality
119
+ degree_algo = nk.centrality.DegreeCentrality(merged_graph, normalized=True)
120
+ degree_algo.run()
121
+ pagerank_values = degree_algo.scores()
122
 
123
+ # 5. ADD MEMORY MANAGEMENT
124
+ # Add this function and call it after each simulation:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
+ def cleanup_memory():
127
+ """Clean up memory after large graph operations."""
128
+ gc.collect() # Force garbage collection
 
 
 
 
 
 
129
 
130
+ # Call this in run_single_simulation after processing:
131
+ # ... existing simulation code ...
132
+ cleanup_memory() # Add this line before returning results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
 
134
+ # 6. MODIFY THE PROGRESS MESSAGES FOR LARGE SIMULATIONS
135
+ # Update the encouraging_messages list in main():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
 
137
+ if www_nodes >= 500000:
138
+ encouraging_messages = [
139
+ f"🔬 Creating massive internet simulation ({www_nodes:,} sites)... This will take a while!",
140
+ "🌐 Building ultra-large network topology...",
141
+ "📊 Computing importance scores for massive network...",
142
+ "🎯 Running test with millions of connections...",
143
+ "📈 Almost there! Processing final calculations..."
144
+ ]
145
+ else:
146
+ encouraging_messages = [
147
+ f"🔬 Setting up large internet simulation ({www_nodes:,} sites)...",
148
+ "🌐 Connecting your pages to the web...",
149
+ "📊 Calculating page importance scores...",
150
+ "🎯 Running tests with different scenarios...",
151
+ "📈 Almost done! Analyzing results..."
152
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
 
154
+ # 7. ADD OPTION FOR REDUCED TESTS ON LARGE SIMULATIONS
155
+ # Add this logic before the test running section:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
+ # Automatically reduce tests for very large simulations
158
+ if www_nodes >= 750000 and num_tests > 10:
159
+ st.warning(f"⚠️ Automatically reducing tests from {num_tests} to 10 for {internet_size} to prevent timeout.")
160
+ num_tests = min(num_tests, 10)
161
+ elif www_nodes >= 500000 and num_tests > 15:
162
+ st.warning(f"⚠️ Automatically reducing tests from {num_tests} to 15 for {internet_size}.")
163
+ num_tests = min(num_tests, 15)