#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ffm_demo.py - FFM (Fully Forward Mode) Neural Network Demo
全前向模式神经网络演示程序

基于清华大学"太极-II"光芯片研究
Author: AI Research Lab
Date: 2024
License: MIT

使用方法:
    python ffm_demo.py [--experiment all|xor|spiral|optical|comparison]
    
依赖:
    pip install numpy matplotlib
"""

import numpy as np
import matplotlib.pyplot as plt
import time
import argparse
from typing import List, Tuple, Optional, Dict
import warnings
warnings.filterwarnings('ignore')

# ======================== 配置参数 ========================
class Config:
    """全局配置"""
    RANDOM_SEED = 42
    FIGURE_DPI = 100
    COLORS = {
        'primary': '#2E86AB',
        'secondary': '#A23B72',
        'success': '#4CAF50',
        'warning': '#FF9800',
        'danger': '#F44336'
    }

# 设置随机种子
np.random.seed(Config.RANDOM_SEED)

# 设置matplotlib参数
plt.rcParams['figure.dpi'] = Config.FIGURE_DPI
plt.rcParams['figure.figsize'] = (10, 6)
plt.rcParams['axes.grid'] = True
plt.rcParams['grid.alpha'] = 0.3

# ======================== FFM神经网络核心实现 ========================

class FFMNeuralNetwork:
    """
    全前向模式（FFM）神经网络
    
    核心创新：仅使用前向传播进行训练，利用系统对称性消除反向传播需求
    """
    
    def __init__(self, 
                 layers: List[int], 
                 activation: str = 'tanh',
                 symmetric: bool = True,
                 use_regularization: bool = True):
        """
        初始化FFM网络
        
        参数:
            layers: 网络层结构，例如 [2, 4, 1]
            activation: 激活函数类型 ('tanh', 'sigmoid', 'relu')
            symmetric: 是否使用对称权重（FFM的关键）
            use_regularization: 是否使用L2正则化
        """
        self.layers = layers
        self.n_layers = len(layers)
        self.activation_type = activation
        self.symmetric = symmetric
        self.use_regularization = use_regularization
        
        # 初始化权重和偏置
        self._initialize_weights()
        
        # 训练历史
        self.history = {
            'loss': [],
            'accuracy': [],
            'gradients': []
        }
    
    def _initialize_weights(self):
        """改进的权重初始化"""
        self.weights = []
        self.biases = []
        
        for i in range(self.n_layers - 1):
            n_in = self.layers[i]
            n_out = self.layers[i + 1]
            
            # Xavier/He初始化
            if self.activation_type == 'relu':
                scale = np.sqrt(2.0 / n_in)
            else:
                scale = np.sqrt(2.0 / (n_in + n_out))
            
            # 初始化权重
            W = np.random.randn(n_out, n_in) * scale
            
            # 确保对称性（如果需要）
            if self.symmetric and n_out == n_in:
                W = (W + W.T) / 2
            
            b = np.zeros((n_out, 1))
            
            self.weights.append(W)
            self.biases.append(b)
    
    def activate(self, x: np.ndarray, derivative: bool = False) -> np.ndarray:
        """激活函数及其导数"""
        # 数值稳定性处理
        x = np.clip(x, -10, 10)
        
        if self.activation_type == 'tanh':
            if derivative:
                tanh_x = np.tanh(x)
                return 1 - tanh_x ** 2
            return np.tanh(x)
        
        elif self.activation_type == 'sigmoid':
            sig = 1 / (1 + np.exp(-x))
            if derivative:
                return sig * (1 - sig)
            return sig
        
        elif self.activation_type == 'relu':
            if derivative:
                return (x > 0).astype(float)
            return np.maximum(0, x)
        
        else:  # linear
            return x if not derivative else np.ones_like(x)
    
    def forward(self, x: np.ndarray) -> Tuple[List[np.ndarray], List[np.ndarray]]:
        """
        前向传播
        
        返回:
            activations: 各层激活值
            z_values: 各层加权和（未激活）
        """
        if x.ndim == 1:
            x = x.reshape(-1, 1)
        
        activations = [x]
        z_values = []
        
        for i in range(len(self.weights)):
            # 线性变换
            z = np.dot(self.weights[i], activations[-1]) + self.biases[i]
            z = np.clip(z, -100, 100)  # 防止数值溢出
            z_values.append(z)
            
            # 激活函数
            if i < len(self.weights) - 1:
                a = self.activate(z)
            else:
                # 输出层使用sigmoid（用于二分类）
                a = 1 / (1 + np.exp(-np.clip(z, -10, 10)))
            
            activations.append(a)
        
        return activations, z_values
    
    def train_ffm(self, 
                  x: np.ndarray, 
                  y_target: np.ndarray,
                  learning_rate: float = 0.01,
                  reg_lambda: float = 0.001) -> float:
        """
        FFM训练方法 - 核心创新
        
        使用两次前向传播代替反向传播：
        1. 数据前向传播
        2. 误差前向传播（利用对称性）
        """
        # 第一次前向传播：数据流
        activations, z_values = self.forward(x)
        y_pred = activations[-1]
        
        if y_target.ndim == 1:
            y_target = y_target.reshape(-1, 1)
        
        # 计算损失
        error = y_target - y_pred
        mse_loss = 0.5 * np.sum(error ** 2)
        
        # L2正则化
        reg_loss = 0
        if self.use_regularization:
            for W in self.weights:
                reg_loss += reg_lambda * np.sum(W ** 2)
        
        total_loss = mse_loss + reg_loss
        
        # FFM核心：误差的"前向"传播
        errors = [error]
        
        # 通过对称性传播误差（关键创新）
        for i in range(len(self.weights) - 1, 0, -1):
            # 使用权重转置，但在物理系统中这是通过对称性实现的前向传播
            error_prop = np.dot(self.weights[i].T, errors[0])
            
            if i > 0:
                # 应用激活函数的导数
                deriv = self.activate(z_values[i-1], derivative=True)
                deriv = np.clip(deriv, 1e-7, 1.0)  # 防止梯度消失
                error_prop = error_prop * deriv
            
            errors.insert(0, error_prop)
        
        # 计算梯度并更新权重
        weight_gradients = []
        bias_gradients = []
        
        for i in range(len(self.weights)):
            # FFM梯度公式
            w_grad = np.dot(errors[i], activations[i].T)
            b_grad = errors[i]
            
            # 添加L2正则化梯度
            if self.use_regularization:
                w_grad -= reg_lambda * self.weights[i]
            
            weight_gradients.append(w_grad)
            bias_gradients.append(b_grad)
            
            # 梯度裁剪
            w_grad = np.clip(w_grad, -5, 5)
            b_grad = np.clip(b_grad, -5, 5)
            
            # 更新参数
            self.weights[i] += learning_rate * w_grad
            self.biases[i] += learning_rate * b_grad
            
            # 维持对称性
            if self.symmetric and i < len(self.weights) - 1:
                if self.weights[i].shape[0] == self.weights[i].shape[1]:
                    self.weights[i] = (self.weights[i] + self.weights[i].T) / 2
            
            # 权重裁剪
            self.weights[i] = np.clip(self.weights[i], -10, 10)
        
        # 记录历史
        self.history['loss'].append(total_loss)
        self.history['gradients'].append([np.linalg.norm(g) for g in weight_gradients])
        
        return total_loss
    
    def train_backprop(self, 
                       x: np.ndarray, 
                       y_target: np.ndarray,
                       learning_rate: float = 0.01) -> float:
        """传统反向传播（用于对比）"""
        # 前向传播
        activations, z_values = self.forward(x)
        y_pred = activations[-1]
        
        if y_target.ndim == 1:
            y_target = y_target.reshape(-1, 1)
        
        # 损失计算
        loss = 0.5 * np.sum((y_pred - y_target) ** 2)
        
        # 反向传播
        delta = y_pred - y_target
        deltas = [delta]
        
        for i in range(len(self.weights) - 1, 0, -1):
            delta = np.dot(self.weights[i].T, deltas[0])
            if i > 0:
                delta = delta * self.activate(z_values[i-1], derivative=True)
            deltas.insert(0, delta)
        
        # 更新权重
        for i in range(len(self.weights)):
            w_grad = np.dot(deltas[i], activations[i].T)
            b_grad = deltas[i]
            
            self.weights[i] -= learning_rate * w_grad
            self.biases[i] -= learning_rate * b_grad
        
        return loss
    
    def predict(self, x: np.ndarray) -> np.ndarray:
        """预测"""
        activations, _ = self.forward(x)
        return activations[-1].flatten()
    
    def predict_batch(self, X: np.ndarray) -> np.ndarray:
        """批量预测"""
        predictions = []
        for x in X:
            pred = self.predict(x)
            predictions.append(pred[0] if len(pred) == 1 else pred)
        return np.array(predictions)
    
    def evaluate(self, X: np.ndarray, Y: np.ndarray) -> Dict:
        """评估模型性能"""
        predictions = self.predict_batch(X)
        
        # 二分类准确率
        if predictions.shape[0] == len(Y):
            accuracy = np.mean((predictions > 0.5) == (Y > 0.5))
        else:
            accuracy = 0
        
        # 均方误差
        mse = np.mean((predictions - Y) ** 2)
        
        return {
            'accuracy': accuracy,
            'mse': mse,
            'predictions': predictions
        }


# ======================== 光学系统模拟 ========================

class OpticalSystemSimulator:
    """
    光学FFM系统模拟器
    模拟液晶空间光调制器（SLM）和光场传播
    """
    
    def __init__(self, 
                 resolution: Tuple[int, int] = (64, 64),
                 wavelength: float = 632.8e-9,
                 pixel_pitch: float = 8e-6):
        """
        初始化光学系统
        
        参数:
            resolution: SLM分辨率
            wavelength: 激光波长（米）
            pixel_pitch: 像素间距（米）
        """
        self.resolution = resolution
        self.wavelength = wavelength
        self.pixel_pitch = pixel_pitch
        self.k = 2 * np.pi / wavelength  # 波数
        
        # 创建坐标网格
        self.x = np.linspace(-resolution[0]//2, resolution[0]//2, resolution[0]) * pixel_pitch
        self.y = np.linspace(-resolution[1]//2, resolution[1]//2, resolution[1]) * pixel_pitch
        self.X, self.Y = np.meshgrid(self.x, self.y)
    
    def create_gaussian_beam(self, waist: float = 1e-3) -> np.ndarray:
        """创建高斯光束"""
        r2 = self.X**2 + self.Y**2
        return np.exp(-r2 / (waist**2))
    
    def phase_modulation(self, field: np.ndarray, phase: np.ndarray) -> np.ndarray:
        """相位调制（模拟SLM）"""
        return field * np.exp(1j * phase)
    
    def propagate(self, field: np.ndarray, distance: float) -> np.ndarray:
        """
        自由空间传播（角谱方法）
        """
        # FFT到频域
        field_fft = np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(field)))
        
        # 频域坐标
        fx = np.fft.fftshift(np.fft.fftfreq(self.resolution[0], self.pixel_pitch))
        fy = np.fft.fftshift(np.fft.fftfreq(self.resolution[1], self.pixel_pitch))
        FX, FY = np.meshgrid(fx, fy)
        
        # 传播核
        kz = np.sqrt(self.k**2 - (2*np.pi*FX)**2 - (2*np.pi*FY)**2 + 0j)
        H = np.exp(1j * kz * distance)
        
        # 传播
        field_prop = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(field_fft * H)))
        
        return field_prop
    
    def interference_pattern(self, field1: np.ndarray, field2: np.ndarray) -> np.ndarray:
        """计算干涉图样"""
        return np.abs(field1 + field2) ** 2
    
    def simulate_ffm_propagation(self, weights: List[np.ndarray]) -> Dict:
        """
        模拟FFM光学传播
        """
        # 创建输入光场
        input_field = self.create_gaussian_beam()
        
        # 数据前向传播
        field = input_field
        for i, W in enumerate(weights):
            # 权重映射到相位 (简化处理)
            phase = np.angle(np.sum(W)) * np.ones(self.resolution)
            field = self.phase_modulation(field, phase)
            field = self.propagate(field, 0.1)  # 传播10cm
        
        output_intensity = np.abs(field) ** 2
        
        # 模拟误差前向传播（利用对称性）
        error_field = field * 0.1  # 简化的误差
        for W in reversed(weights):
            phase = np.angle(np.sum(W.T)) * np.ones(self.resolution)
            error_field = self.phase_modulation(error_field, phase)
            error_field = self.propagate(error_field, 0.1)
        
        # 干涉测量（获取梯度信息）
        gradient_info = self.interference_pattern(field, error_field)
        
        return {
            'input': input_field,
            'output': output_intensity,
            'gradient': gradient_info
        }


# ======================== 数据集生成 ========================

class DataGenerator:
    """数据生成器"""
    
    @staticmethod
    def generate_xor(n_samples: int = 4) -> Tuple[np.ndarray, np.ndarray]:
        """生成XOR数据"""
        X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
        Y = np.array([0, 1, 1, 0])
        return X, Y
    
    @staticmethod
    def generate_spiral(n_points: int = 100, n_classes: int = 2, noise: float = 0.1) -> Tuple[np.ndarray, np.ndarray]:
        """生成螺旋数据"""
        X = []
        Y = []
        
        for class_idx in range(n_classes):
            theta = np.linspace(class_idx * np.pi, 
                              (class_idx + 2) * np.pi, 
                              n_points // n_classes)
            r = np.linspace(0.1, 1, n_points // n_classes)
            
            x = r * np.cos(theta) + np.random.randn(len(r)) * noise
            y = r * np.sin(theta) + np.random.randn(len(r)) * noise
            
            X.extend(zip(x, y))
            Y.extend([class_idx] * len(r))
        
        return np.array(X), np.array(Y)
    
    @staticmethod
    def generate_circle(n_points: int = 100) -> Tuple[np.ndarray, np.ndarray]:
        """生成圆形分类数据"""
        X = np.random.randn(n_points, 2)
        Y = (np.linalg.norm(X, axis=1) < 1.0).astype(int)
        return X, Y
    
    @staticmethod
    def generate_mnist_simple(n_samples: int = 100) -> Tuple[np.ndarray, np.ndarray]:
        """生成简化的MNIST风格数据（2x2像素）"""
        X = np.random.randn(n_samples, 4)
        # 简单规则：左上和右下像素和大于0为类1
        Y = ((X[:, 0] + X[:, 3]) > 0).astype(int)
        return X, Y


# ======================== 可视化工具 ========================

class Visualizer:
    """可视化工具类"""
    
    @staticmethod
    def plot_loss_curves(losses_dict: Dict, title: str = "Training Loss"):
        """绘制损失曲线"""
        plt.figure(figsize=(10, 6))
        
        for label, losses in losses_dict.items():
            plt.plot(losses, label=label, linewidth=2)
        
        plt.xlabel('Epoch', fontsize=12)
        plt.ylabel('Loss', fontsize=12)
        plt.title(title, fontsize=14, fontweight='bold')
        plt.legend(loc='best')
        plt.yscale('log')
        plt.grid(True, alpha=0.3)
        plt.tight_layout()
        plt.show()
    
    @staticmethod
    def plot_decision_boundary(network: FFMNeuralNetwork, 
                              X: np.ndarray, 
                              Y: np.ndarray,
                              title: str = "Decision Boundary"):
        """绘制决策边界"""
        if X.shape[1] != 2:
            print("决策边界可视化仅支持2D数据")
            return
        
        plt.figure(figsize=(10, 8))
        
        # 创建网格
        x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
        y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
        xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200),
                           np.linspace(y_min, y_max, 200))
        
        # 预测网格点
        Z = np.zeros_like(xx)
        for i in range(xx.shape[0]):
            for j in range(xx.shape[1]):
                point = np.array([xx[i, j], yy[i, j]])
                Z[i, j] = network.predict(point)[0]
        
        # 绘制决策边界
        plt.contourf(xx, yy, Z, levels=20, cmap='RdBu', alpha=0.6)
        plt.colorbar(label='Network Output')
        
        # 绘制数据点
        scatter = plt.scatter(X[:, 0], X[:, 1], c=Y, cmap='RdBu',
                            edgecolor='white', s=100, linewidth=2)
        
        plt.xlabel('Feature 1', fontsize=12)
        plt.ylabel('Feature 2', fontsize=12)
        plt.title(title, fontsize=14, fontweight='bold')
        plt.grid(True, alpha=0.3)
        plt.tight_layout()
        plt.show()
    
    @staticmethod
    def plot_optical_simulation(sim_results: Dict):
        """绘制光学模拟结果"""
        fig, axes = plt.subplots(1, 3, figsize=(15, 5))
        
        # 输入光场
        im1 = axes[0].imshow(np.abs(sim_results['input']), cmap='hot')
        axes[0].set_title('输入光场', fontsize=12)
        axes[0].axis('off')
        plt.colorbar(im1, ax=axes[0], fraction=0.046)
        
        # 输出光场
        im2 = axes[1].imshow(sim_results['output'], cmap='hot')
        axes[1].set_title('输出强度', fontsize=12)
        axes[1].axis('off')
        plt.colorbar(im2, ax=axes[1], fraction=0.046)
        
        # 梯度信息
        im3 = axes[2].imshow(sim_results['gradient'], cmap='seismic')
        axes[2].set_title('梯度信息（干涉图）', fontsize=12)
        axes[2].axis('off')
        plt.colorbar(im3, ax=axes[2], fraction=0.046)
        
        plt.suptitle('光学FFM系统模拟', fontsize=14, fontweight='bold')
        plt.tight_layout()
        plt.show()
    
    @staticmethod
    def plot_performance_comparison(results: Dict):
        """绘制性能对比图"""
        fig, axes = plt.subplots(1, 3, figsize=(15, 5))
        
        # 准确率对比
        methods = list(results.keys())
        accuracies = [results[m]['accuracy'] for m in methods]
        colors = [Config.COLORS['success'], Config.COLORS['primary']]
        
        axes[0].bar(methods, accuracies, color=colors)
        axes[0].set_ylabel('Accuracy (%)', fontsize=12)
        axes[0].set_title('准确率对比', fontsize=12, fontweight='bold')
        axes[0].set_ylim([0, 105])
        
        for i, (method, acc) in enumerate(zip(methods, accuracies)):
            axes[0].text(i, acc + 2, f'{acc:.1f}%', ha='center')
        
        # 训练时间对比
        times = [results[m]['time'] for m in methods]
        axes[1].bar(methods, times, color=colors)
        axes[1].set_ylabel('Time (seconds)', fontsize=12)
        axes[1].set_title('训练时间对比', fontsize=12, fontweight='bold')
        
        # 能效对比（模拟值）
        energy = [results[m].get('energy', 1.0) for m in methods]
        axes[2].bar(methods, energy, color=colors)
        axes[2].set_ylabel('Energy (Relative)', fontsize=12)
        axes[2].set_title('能效对比（相对值）', fontsize=12, fontweight='bold')
        axes[2].set_yscale('log')
        
        plt.suptitle('FFM vs 传统反向传播', fontsize=14, fontweight='bold')
        plt.tight_layout()
        plt.show()


# ======================== 实验函数 ========================

def experiment_xor():
    """XOR问题实验"""
    print("\n" + "="*60)
    print("实验1：XOR问题")
    print("="*60)
    
    # 生成数据
    X, Y = DataGenerator.generate_xor()
    
    # 创建网络
    print("\n创建FFM网络...")
    ffm_net = FFMNeuralNetwork([2, 4, 1], activation='tanh', symmetric=True)
    bp_net = FFMNeuralNetwork([2, 4, 1], activation='tanh', symmetric=False)
    
    # 复制初始权重（公平对比）
    bp_net.weights = [w.copy() for w in ffm_net.weights]
    bp_net.biases = [b.copy() for b in ffm_net.biases]
    
    # 训练参数
    epochs = 500
    learning_rate = 0.3
    
    # FFM训练
    print(f"\n训练FFM网络 ({epochs} epochs)...")
    ffm_losses = []
    start_time = time.time()
    
    for epoch in range(epochs):
        epoch_loss = 0
        for x, y in zip(X, Y):
            loss = ffm_net.train_ffm(x, y, learning_rate)
            epoch_loss += loss
        ffm_losses.append(epoch_loss / len(X))
        
        if (epoch + 1) % 100 == 0:
            acc = ffm_net.evaluate(X, Y)['accuracy'] * 100
            print(f"  Epoch {epoch+1}: Loss={ffm_losses[-1]:.4f}, Accuracy={acc:.1f}%")
    
    ffm_time = time.time() - start_time
    
    # 反向传播训练
    print(f"\n训练反向传播网络 ({epochs} epochs)...")
    bp_losses = []
    start_time = time.time()
    
    for epoch in range(epochs):
        epoch_loss = 0
        for x, y in zip(X, Y):
            loss = bp_net.train_backprop(x, y, learning_rate)
            epoch_loss += loss
        bp_losses.append(epoch_loss / len(X))
        
        if (epoch + 1) % 100 == 0:
            acc = bp_net.evaluate(X, Y)['accuracy'] * 100
            print(f"  Epoch {epoch+1}: Loss={bp_losses[-1]:.4f}, Accuracy={acc:.1f}%")
    
    bp_time = time.time() - start_time
    
    # 结果对比
    print("\n" + "-"*40)
    print("最终结果对比：")
    print("-"*40)
    
    ffm_eval = ffm_net.evaluate(X, Y)
    bp_eval = bp_net.evaluate(X, Y)
    
    print(f"FFM网络:")
    print(f"  最终损失: {ffm_losses[-1]:.6f}")
    print(f"  准确率: {ffm_eval['accuracy']*100:.1f}%")
    print(f"  训练时间: {ffm_time:.3f}秒")
    
    print(f"\n反向传播网络:")
    print(f"  最终损失: {bp_losses[-1]:.6f}")
    print(f"  准确率: {bp_eval['accuracy']*100:.1f}%")
    print(f"  训练时间: {bp_time:.3f}秒")
    
    # 预测展示
    print("\n" + "-"*40)
    print("预测结果：")
    print("-"*40)
    print("输入\t\t目标\tFFM预测\t反向传播预测")
    for x, y in zip(X, Y):
        ffm_pred = ffm_net.predict(x)[0]
        bp_pred = bp_net.predict(x)[0]
        print(f"{x}\t{y}\t{ffm_pred:.3f}\t{bp_pred:.3f}")
    
    # 可视化
    Visualizer.plot_loss_curves(
        {'FFM': ffm_losses, 'Backprop': bp_losses},
        title='XOR Problem: Loss Comparison'
    )
    
    Visualizer.plot_decision_boundary(
        ffm_net, X, Y, 
        title='XOR Problem: FFM Decision Boundary'
    )
    
    return {
        'FFM': {'accuracy': ffm_eval['accuracy']*100, 'time': ffm_time, 'energy': 0.1},
        'Backprop': {'accuracy': bp_eval['accuracy']*100, 'time': bp_time, 'energy': 1.0}
    }


def experiment_spiral():
    """螺旋数据实验"""
    print("\n" + "="*60)
    print("实验2：螺旋数据集")
    print("="*60)
    
    # 生成数据
    X, Y = DataGenerator.generate_spiral(n_points=100, noise=0.1)
    
    # 创建网络
    print("\n创建深层FFM网络...")
    network = FFMNeuralNetwork([2, 8, 6, 4, 1], activation='tanh', symmetric=True)
    
    # 训练
    epochs = 200
    learning_rate = 0.05
    losses = []
    
    print(f"\n训练网络 ({epochs} epochs)...")
    for epoch in range(epochs):
        epoch_loss = 0
        for x, y in zip(X, Y):
            loss = network.train_ffm(x, y, learning_rate)
            epoch_loss += loss
        losses.append(epoch_loss / len(X))
        
        if (epoch + 1) % 40 == 0:
            acc = network.evaluate(X, Y)['accuracy'] * 100
            print(f"  Epoch {epoch+1}: Loss={losses[-1]:.4f}, Accuracy={acc:.1f}%")
    
    # 评估
    final_eval = network.evaluate(X, Y)
    print(f"\n最终准确率: {final_eval['accuracy']*100:.1f}%")
    
    # 可视化
    Visualizer.plot_decision_boundary(
        network, X, Y,
        title='Spiral Dataset: FFM Decision Boundary'
    )
    
    return network


def experiment_optical_simulation():
    """光学系统模拟"""
    print("\n" + "="*60)
    print("实验3：光学系统模拟")
    print("="*60)
    
    # 创建光学模拟器
    print("\n初始化光学系统...")
    simulator = OpticalSystemSimulator(resolution=(64, 64))
    
    # 创建简单网络用于模拟
    network = FFMNeuralNetwork([2, 3, 1], symmetric=True)
    
    # 模拟光学传播
    print("模拟光学FFM传播...")
    sim_results = simulator.simulate_ffm_propagation(network.weights)
    
    # 显示结果
    print(f"输入光场能量: {np.sum(np.abs(sim_results['input'])**2):.2f}")
    print(f"输出光场能量: {np.sum(sim_results['output']):.2f}")
    print(f"梯度信息范数: {np.linalg.norm(sim_results['gradient']):.2f}")
    
    # 可视化
    Visualizer.plot_optical_simulation(sim_results)
    
    return simulator


def experiment_comparison():
    """综合性能对比"""
    print("\n" + "="*60)
    print("实验4：综合性能对比")
    print("="*60)
    
    # 测试不同规模网络
    network_configs = [
        {'name': '小型', 'layers': [2, 4, 1]},
        {'name': '中型', 'layers': [4, 8, 4, 2]},
        {'name': '大型', 'layers': [8, 16, 8, 4, 1]}
    ]
    
    results = []
    
    for config in network_configs:
        print(f"\n测试{config['name']}网络 {config['layers']}...")
        
        # 生成数据
        input_dim = config['layers'][0]
        output_dim = config['layers'][-1]
        n_samples = 50
        
        X = np.random.randn(n_samples, input_dim)
        Y = np.random.rand(n_samples, output_dim)
        
        # FFM网络
        ffm_net = FFMNeuralNetwork(config['layers'], symmetric=True)
        
        # 训练
        start_time = time.time()
        for epoch in range(100):
            for x, y in zip(X, Y):
                ffm_net.train_ffm(x, y, learning_rate=0.01)
        
        train_time = time.time() - start_time
        
        # 记录结果
        n_params = sum(w.size + b.size for w, b in zip(ffm_net.weights, ffm_net.biases))
        results.append({
            'name': config['name'],
            'layers': str(config['layers']),
            'parameters': n_params,
            'time': train_time,
            'final_loss': ffm_net.history['loss'][-1] if ffm_net.history['loss'] else 0
        })
    
    # 打印结果表格
    print("\n" + "="*70)
    print("性能对比结果")
    print("="*70)
    print(f"{'网络':<10} {'结构':<20} {'参数数':<10} {'训练时间':<12} {'最终损失':<10}")
    print("-"*70)
    
    for r in results:
        print(f"{r['name']:<10} {r['layers']:<20} {r['parameters']:<10} "
              f"{r['time']:<12.3f} {r['final_loss']:<10.6f}")
    
    print("="*70)
    
    return results


def experiment_symmetry_analysis():
    """对称性影响分析"""
    print("\n" + "="*60)
    print("实验5：对称性影响分析")
    print("="*60)
    
    # 生成数据
    X, Y = DataGenerator.generate_xor()
    
    # 对称网络
    print("\n训练对称网络...")
    sym_net = FFMNeuralNetwork([2, 4, 1], symmetric=True)
    sym_losses = []
    
    for epoch in range(300):
        epoch_loss = 0
        for x, y in zip(X, Y):
            loss = sym_net.train_ffm(x, y, learning_rate=0.3)
            epoch_loss += loss
        sym_losses.append(epoch_loss / len(X))
    
    # 非对称网络
    print("训练非对称网络...")
    asym_net = FFMNeuralNetwork([2, 4, 1], symmetric=False)
    asym_losses = []
    
    for epoch in range(300):
        epoch_loss = 0
        for x, y in zip(X, Y):
            loss = asym_net.train_ffm(x, y, learning_rate=0.3)
            epoch_loss += loss
        asym_losses.append(epoch_loss / len(X))
    
    # 结果对比
    print("\n" + "-"*40)
    print("对称性影响分析结果：")
    print("-"*40)
    print(f"对称网络最终损失: {sym_losses[-1]:.6f}")
    print(f"非对称网络最终损失: {asym_losses[-1]:.6f}")
    
    sym_acc = sym_net.evaluate(X, Y)['accuracy'] * 100
    asym_acc = asym_net.evaluate(X, Y)['accuracy'] * 100
    print(f"对称网络准确率: {sym_acc:.1f}%")
    print(f"非对称网络准确率: {asym_acc:.1f}%")
    
    # 可视化
    Visualizer.plot_loss_curves(
        {'对称网络': sym_losses, '非对称网络': asym_losses},
        title='对称性对FFM训练的影响'
    )
    
    return sym_net, asym_net


# ======================== 主程序 ========================

def print_header():
    """打印程序头部"""
    print("\n" + "="*60)
    print(" " * 15 + "FFM神经网络演示程序")
    print(" " * 10 + "基于清华大学'太极-II'光芯片研究")
    print("="*60)
    print("\n作者: AI Research Lab")
    print("版本: 1.0.0")
    print("日期: 2024")
    print("\n" + "="*60)


def main():
    """主函数"""
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='FFM神经网络演示程序')
    parser.add_argument('--experiment', 
                       type=str, 
                       default='all',
                       choices=['all', 'xor', 'spiral', 'optical', 'comparison', 'symmetry'],
                       help='选择要运行的实验')
    args = parser.parse_args()
    
    # 打印程序头部
    print_header()
    
    # 运行实验
    try:
        if args.experiment == 'all':
            print("\n运行所有实验...\n")
            
            # 1. XOR实验
            xor_results = experiment_xor()
            
            # 2. 螺旋数据实验
            spiral_net = experiment_spiral()
            
            # 3. 光学模拟
            optical_sim = experiment_optical_simulation()
            
            # 4. 性能对比
            comparison_results = experiment_comparison()
            
            # 5. 对称性分析
            sym_net, asym_net = experiment_symmetry_analysis()
            
            # 综合性能展示
            if xor_results:
                Visualizer.plot_performance_comparison(xor_results)
            
        elif args.experiment == 'xor':
            experiment_xor()
        elif args.experiment == 'spiral':
            experiment_spiral()
        elif args.experiment == 'optical':
            experiment_optical_simulation()
        elif args.experiment == 'comparison':
            experiment_comparison()
        elif args.experiment == 'symmetry':
            experiment_symmetry_analysis()
        
        print("\n" + "="*60)
        print("所有实验完成！")
        print("FFM方法成功验证：仅用前向传播即可训练神经网络")
        print("="*60)
        
    except KeyboardInterrupt:
        print("\n\n程序被用户中断")
    except Exception as e:
        print(f"\n运行出错: {e}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    main()