Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import numpy as np | |
| import torch | |
| import matplotlib.pyplot as plt | |
| from torch import nn | |
| from torch.optim import SGD | |
| from torch.nn import CrossEntropyLoss | |
| from scipy.special import softmax | |
| from scipy.stats import entropy | |
| from sklearn.model_selection import train_test_split | |
| from sklearn.metrics import accuracy_score | |
| # --- Core Classes & Functions --- | |
| class Branch: | |
| def __init__(self, state, r, H, v): | |
| self.state = state | |
| self.r = r | |
| self.H = H | |
| self.v = v | |
| def orchestrate(branches, V_s=1.0, epsilon=1e-10, A=1.0, alpha=0.9): | |
| n = len(branches) | |
| D_perp = np.zeros((n, n)) | |
| for i in range(n): | |
| for j in range(i + 1, n): | |
| p_i, p_j = branches[i].state, branches[j].state | |
| u_i = branches[i].v / (np.linalg.norm(branches[i].v) + epsilon) | |
| u_j = branches[j].v / (np.linalg.norm(branches[j].v) + epsilon) | |
| cos_theta = np.abs(np.dot(u_i, u_j)) | |
| kl = np.sum(p_i * np.log(p_i / (p_j + epsilon) + epsilon)) | |
| d = kl * (1 - cos_theta) | |
| D_perp[i, j] = D_perp[j, i] = d | |
| avg_dperp = np.mean(D_perp) if np.any(D_perp > 0) else 0.0 | |
| cos_list = [] | |
| for i in range(n): | |
| for j in range(i+1, n): | |
| norm_i = np.linalg.norm(branches[i].v) + epsilon | |
| norm_j = np.linalg.norm(branches[j].v) + epsilon | |
| cos = np.abs(np.dot(branches[i].v / norm_i, branches[j].v / norm_j)) | |
| cos_list.append(cos) | |
| avg_cos = np.mean(cos_list) if cos_list else 0.0 | |
| st.write(f"Avg Perp Divergence: {avg_dperp:.6f}") | |
| st.write(f"Avg |cos θ|: {avg_cos:.4f} (lower = more orthogonal)") | |
| delta_t = np.array([V_s / (branch.r + epsilon) * np.exp(branch.H) for branch in branches]) | |
| delta_t = np.minimum(delta_t, A) | |
| weights = [] | |
| for i in range(n): | |
| row = 1 / (D_perp[i] + epsilon) | |
| row[i] = 0 | |
| row_sum = np.sum(row) | |
| normalized_row = row / row_sum if row_sum > 0 else row | |
| weights.extend(normalized_row[normalized_row > 0]) | |
| Q = [branch.v / (np.linalg.norm(branch.v) + epsilon) for branch in branches] | |
| V_new = [np.zeros_like(branch.v) for branch in branches] | |
| for i, v_i in enumerate([branch.v for branch in branches]): | |
| influence_sum = np.zeros_like(v_i) | |
| weight_idx = 0 | |
| for j in range(n): | |
| if i == j: | |
| continue | |
| q_j = Q[j] | |
| P_j = np.outer(q_j, q_j) | |
| projected_v = np.dot(P_j, v_i) | |
| influence = weights[weight_idx] * delta_t[j] * projected_v | |
| influence_sum += influence | |
| weight_idx += 1 | |
| V_new[i] = alpha * v_i + (1 - alpha) * influence_sum | |
| for i, branch in enumerate(branches): | |
| branch.v = V_new[i] | |
| return branches | |
| class SimpleModel(nn.Module): | |
| def __init__(self, input_dim, num_classes): | |
| super().__init__() | |
| self.linear = nn.Linear(input_dim, num_classes) | |
| def forward(self, x): | |
| return self.linear(x) | |
| def train_local(model, X, y, epochs=5): | |
| optimizer = SGD(model.parameters(), lr=0.01) | |
| criterion = CrossEntropyLoss() | |
| X_t = torch.from_numpy(X).float() | |
| y_t = torch.from_numpy(y).long() | |
| for _ in range(epochs): | |
| optimizer.zero_grad() | |
| out = model(X_t) | |
| loss = criterion(out, y_t) | |
| loss.backward() | |
| optimizer.step() | |
| def model_to_branch(model, r): | |
| params = np.concatenate([p.flatten().detach().numpy() for p in model.parameters()]) | |
| state = softmax(params) | |
| H = entropy(state) | |
| v = params | |
| return Branch(state, r, H, v) | |
| def branch_to_model(branch, model, input_dim, num_classes): | |
| params = branch.v | |
| weight = torch.from_numpy(params[:-num_classes]).float().reshape(num_classes, input_dim) | |
| bias = torch.from_numpy(params[-num_classes:]).float() | |
| model.linear.weight.data = weight | |
| model.linear.bias.data = bias | |
| def evaluate(model, X_test, y_test): | |
| with torch.no_grad(): | |
| out = model(torch.from_numpy(X_test).float()) | |
| pred = out.argmax(dim=1).numpy() | |
| return accuracy_score(y_test, pred) | |
| # --- App Layout --- | |
| st.set_page_config(page_title="Perpendicular Orchestration Demo", layout="wide") | |
| # Patent Abstract at Top | |
| st.markdown(""" | |
| # Perpendicular Orchestration Demo (Patent Pending) | |
| **Abstract** | |
| Heterogeneous computational substrates—human, synthetic, or hybrid—struggle to coordinate decisions without losing structural independence or contextual fidelity. Disclosed herein are systems and methods for orchestrating such choices using a **Perpendicular Kullback–Leibler Divergence Metric**, which couples probabilistic dissimilarity with geometric orthogonality to measure independence between agents. A complementary **entropy-weighted temporal modulation** mechanism ensures equitable pacing among substrates of differing capacity or uncertainty. Together, these enable coherent, privacy-preserving, and autonomy-respecting coordination across distributed systems. The framework applies to command-and-control networks, identity management, affective media hygiene, and hybrid intelligence architectures. | |
| **Inventor**: Juan Carlos Paredes | |
| **Email**: cpt66778811@gmail.com | |
| """) | |
| st.markdown("---") | |
| # Tabs | |
| tab1, tab2 = st.tabs(["Federated Learning Coordination", "Color Palette Demo"]) | |
| with tab1: | |
| st.header("Federated Learning Coordination Demo") | |
| st.write("Live simulation of patent method vs FedAvg baseline on synthetic data.") | |
| # Sidebar controls | |
| st.sidebar.header("Parameters") | |
| alpha = st.sidebar.slider("Alpha (mixing strength)", 0.5, 1.0, 0.9, 0.05) | |
| epochs = st.sidebar.slider("Local epochs per round", 1, 20, 5) | |
| rounds = st.sidebar.slider("Coordination rounds", 1, 10, 5) | |
| skew = st.sidebar.selectbox("Data skew", ["IID (easy)", "Mild", "Extreme (90/10)"]) | |
| if st.sidebar.button("Run Simulation"): | |
| with st.spinner("Running..."): | |
| # Data generation with skew | |
| rng = np.random.RandomState(42) | |
| input_dim = 10 | |
| num_classes = 2 | |
| n_samples = 300 | |
| n_clients = 3 | |
| # Base data | |
| X = rng.randn(n_samples, input_dim) | |
| y = (np.sum(X[:, :5], axis=1) > 0).astype(int) | |
| X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) | |
| # Skew split | |
| if skew == "Extreme (90/10)": | |
| pos_mask = y_train == 1 | |
| neg_mask = y_train == 0 | |
| pos_idx = np.where(pos_mask)[0] | |
| neg_idx = np.where(neg_mask)[0] | |
| # Client 0: 90% positive | |
| client0_idx = np.concatenate([pos_idx[:72], neg_idx[:8]]) | |
| # Client 1: 90% negative | |
| remaining_neg = neg_idx[8:] | |
| client1_idx = np.concatenate([remaining_neg[:72], pos_idx[72:80]]) | |
| # Client 2: leftovers | |
| remaining = np.setdiff1d(np.arange(len(X_train)), np.concatenate([client0_idx, client1_idx])) | |
| client2_idx = remaining[:80] | |
| client_data = [X_train[client0_idx], X_train[client1_idx], X_train[client2_idx]] | |
| client_labels = [y_train[client0_idx], y_train[client1_idx], y_train[client2_idx]] | |
| else: | |
| client_data = np.array_split(X_train, n_clients) | |
| client_labels = np.array_split(y_train, n_clients) | |
| # Your method | |
| st.subheader("Your Method") | |
| models = [SimpleModel(input_dim, num_classes) for _ in range(n_clients)] | |
| your_acc = [] | |
| your_cos = [] | |
| for r in range(rounds): | |
| for i in range(n_clients): | |
| train_local(models[i], client_data[i], client_labels[i], epochs=epochs) | |
| branches = [model_to_branch(models[i], len(client_data[i])) for i in range(n_clients)] | |
| branches = orchestrate(branches, alpha=alpha) | |
| for i in range(n_clients): | |
| branch_to_model(branches[i], models[i], input_dim, num_classes) | |
| accs = [evaluate(m, X_test, y_test) for m in models] | |
| avg_acc = np.mean(accs) | |
| your_acc.append(avg_acc) | |
| cos_list = [] | |
| for i in range(n_clients): | |
| for j in range(i+1, n_clients): | |
| norm_i = np.linalg.norm(branches[i].v) + 1e-10 | |
| norm_j = np.linalg.norm(branches[j].v) + 1e-10 | |
| cos = np.abs(np.dot(branches[i].v / norm_i, branches[j].v / norm_j)) | |
| cos_list.append(cos) | |
| your_cos.append(np.mean(cos_list)) | |
| fig, ax = plt.subplots(1, 2, figsize=(12, 5)) | |
| ax[0].plot(range(1, rounds+1), your_acc, marker='o', color='blue') | |
| ax[0].set_title("Your Method Accuracy") | |
| ax[1].plot(range(1, rounds+1), your_cos, marker='o', color='green') | |
| ax[1].set_title("Your Method cos θ (Orthogonality)") | |
| st.pyplot(fig) | |
| # FedAvg (similar block — abbreviated for length, paste full from previous) | |
| st.subheader("FedAvg Baseline") | |
| # (paste FedAvg code here — same as before) | |
| with tab2: | |
| st.header("Color Palette Demo: Averaging Destroys Meaning") | |
| st.write("3 agents with distinct palettes. Simple averaging = mud. Orchestration = vivid blend.") | |
| # Simple colored boxes (reliable in Streamlit) | |
| col1, col2, col3 = st.columns(3) | |
| with col1: | |
| st.markdown("**Initial Palettes**") | |
| st.markdown('<div style="background-color:#FF0000;width:100px;height:100px;"></div>', unsafe_allow_html=True) # Red | |
| st.markdown('<div style="background-color:#FF8C00;width:100px;height:100px;"></div>', unsafe_allow_html=True) # Orange | |
| st.markdown('<div style="background-color:#0000FF;width:100px;height:100px;"></div>', unsafe_allow_html=True) # Blue | |
| st.markdown('<div style="background-color:#00FFFF;width:100px;height:100px;"></div>', unsafe_allow_html=True) # Cyan | |
| st.markdown('<div style="background-color:#808080;width:100px;height:100px;"></div>', unsafe_allow_html=True) # Gray | |
| with col2: | |
| st.markdown("**Simple Averaged (Mud)**") | |
| mud_color = "#808060" # Grayish mud from averaging | |
| for _ in range(5): | |
| st.markdown(f'<div style="background-color:{mud_color};width:100px;height:100px;"></div>', unsafe_allow_html=True) | |
| with col3: | |
| st.markdown("**Orchestrated (Vivid Blend)**") | |
| st.markdown('<div style="background-color:#CC3300;width:100px;height:100px;"></div>', unsafe_allow_html=True) # Blended red | |
| st.markdown('<div style="background-color:#CC6600;width:100px;height:100px;"></div>', unsafe_allow_html=True) # Blended orange | |
| st.markdown('<div style="background-color:#3333CC;width:100px;height:100px;"></div>', unsafe_allow_html=True) # Blended blue | |
| st.markdown('<div style="background-color:#33CCCC;width:100px;height:100px;"></div>', unsafe_allow_html=True) # Blended cyan | |
| st.markdown('<div style="background-color:#999999;width:100px;height:100px;"></div>', unsafe_allow_html=True) # Blended gray | |
| st.markdown("### Contact: cpt66778811@gmail.com | Patent Pending") |