"""
Draw Model Architecture in English
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.patches import FancyBboxPatch, ConnectionPatch
import numpy as np

def draw_model_architecture():
    """Draw model architecture diagram"""
    fig, ax = plt.subplots(1, 1, figsize=(16, 20))
    ax.set_xlim(0, 10)
    ax.set_ylim(0, 20)
    ax.axis('off')
    
    # Define colors
    colors = {
        'input': '#E3F2FD',      # Light blue
        'transform': '#F3E5F5',   # Light purple
        'attention': '#E8F5E8',   # Light green
        'output': '#FFF3E0',      # Light orange
        'loss': '#FFEBEE',        # Light red
        'text': '#333333'         # Dark gray
    }
    
    # 1. Input Layer
    y_pos = 19
    input_box = FancyBboxPatch((1, y_pos-0.4), 8, 0.8, 
                               boxstyle="round,pad=0.1", 
                               facecolor=colors['input'], 
                               edgecolor='black', linewidth=2)
    ax.add_patch(input_box)
    ax.text(5, y_pos, 'Input: Complex Signal (4, 128, 20)\n8×8 Complex Matrix → Real/Imag Separation → 128D Features', 
            ha='center', va='center', fontsize=12, weight='bold')
    
    # 2. Dimension Transform
    y_pos = 17.5
    transform_box = FancyBboxPatch((1.5, y_pos-0.3), 7, 0.6, 
                                   boxstyle="round,pad=0.1", 
                                   facecolor=colors['transform'], 
                                   edgecolor='black')
    ax.add_patch(transform_box)
    ax.text(5, y_pos, 'Dimension Transform: (4,128,20) → (20,4,128)', 
            ha='center', va='center', fontsize=11)
    
    # 3. Positional Encoding
    y_pos = 16
    pos_box = FancyBboxPatch((1.5, y_pos-0.3), 7, 0.6, 
                             boxstyle="round,pad=0.1", 
                             facecolor=colors['transform'], 
                             edgecolor='black')
    ax.add_patch(pos_box)
    ax.text(5, y_pos, 'Positional Encoding: Learnable (1,4,128) + Input', 
            ha='center', va='center', fontsize=11)
    
    # 4. Transformer Encoder Layers (3 layers)
    for i in range(3):
        y_base = 14.5 - i * 3
        
        # Outer frame
        transformer_box = FancyBboxPatch((0.5, y_base-1.4), 9, 2.8, 
                                         boxstyle="round,pad=0.1", 
                                         facecolor=colors['attention'], 
                                         edgecolor='black', linewidth=2)
        ax.add_patch(transformer_box)
        ax.text(5, y_base+1.2, f'Transformer Encoder Layer {i+1}', 
                ha='center', va='center', fontsize=12, weight='bold')
        
        # Multi-scale attention
        attention_box = FancyBboxPatch((1, y_base+0.2), 8, 0.8, 
                                       boxstyle="round,pad=0.05", 
                                       facecolor='white', 
                                       edgecolor='blue')
        ax.add_patch(attention_box)
        ax.text(5, y_base+0.6, 'Multi-Scale Attention Mechanism', 
                ha='center', va='center', fontsize=10, weight='bold')
        
        # Convolution kernels
        conv_kernels = ['3', '5', '7', '9', 'Orig']
        for j, kernel in enumerate(conv_kernels):
            conv_box = FancyBboxPatch((1.5 + j*1.5, y_base-0.1), 1.2, 0.3, 
                                      boxstyle="round,pad=0.02", 
                                      facecolor='lightblue', 
                                      edgecolor='blue')
            ax.add_patch(conv_box)
            ax.text(2.1 + j*1.5, y_base+0.05, f'K={kernel}', 
                    ha='center', va='center', fontsize=8)
        
        # Feed-forward network
        ffn_box = FancyBboxPatch((1, y_base-0.8), 8, 0.5, 
                                 boxstyle="round,pad=0.05", 
                                 facecolor='lightgreen', 
                                 edgecolor='green')
        ax.add_patch(ffn_box)
        ax.text(5, y_base-0.55, 'Feed-Forward Network: 128 → 1024 → 128', 
                ha='center', va='center', fontsize=10)
    
    # 5. Output Layer
    y_pos = 5
    output_box = FancyBboxPatch((1, y_pos-0.8), 8, 1.6, 
                                boxstyle="round,pad=0.1", 
                                facecolor=colors['output'], 
                                edgecolor='black', linewidth=2)
    ax.add_patch(output_box)
    ax.text(5, y_pos+0.4, 'Sparse Representation Network', 
            ha='center', va='center', fontsize=12, weight='bold')
    ax.text(5, y_pos, 'Linear: 128 → 512 → 481', 
            ha='center', va='center', fontsize=10)
    ax.text(5, y_pos-0.4, 'Output: (4, 481, 20)', 
            ha='center', va='center', fontsize=10)
    
    # 6. Sparsification
    y_pos = 3
    sparse_box = FancyBboxPatch((1, y_pos-0.5), 8, 1, 
                                boxstyle="round,pad=0.1", 
                                facecolor='yellow', 
                                edgecolor='orange', linewidth=2)
    ax.add_patch(sparse_box)
    ax.text(5, y_pos+0.2, 'TopK Sparsification', 
            ha='center', va='center', fontsize=12, weight='bold')
    ax.text(5, y_pos-0.2, 'Keep Top 16 values per timestep + L1 Normalization', 
            ha='center', va='center', fontsize=10)
    
    # 7. Reconstruction
    y_pos = 1.5
    recon_box = FancyBboxPatch((1, y_pos-0.5), 8, 1, 
                               boxstyle="round,pad=0.1", 
                               facecolor='lightcoral', 
                               edgecolor='red', linewidth=2)
    ax.add_patch(recon_box)
    ax.text(5, y_pos+0.2, 'Signal Reconstruction', 
            ha='center', va='center', fontsize=12, weight='bold')
    ax.text(5, y_pos-0.2, 'Y_recon = X @ D, Output: (4, 128, 20)', 
            ha='center', va='center', fontsize=10)
    
    # Add arrows
    arrow_props = dict(arrowstyle='->', lw=2, color='black')
    
    # Connect layers
    positions = [19, 17.5, 16, 14.5, 11.5, 8.5, 5, 3, 1.5]
    for i in range(len(positions)-1):
        ax.annotate('', xy=(5, positions[i+1]+0.4), xytext=(5, positions[i]-0.4),
                    arrowprops=arrow_props)
    
    # Add side icons
    ax.text(0.2, 19, '📥', fontsize=20, ha='center')
    ax.text(0.2, 14, '🧠', fontsize=20, ha='center')
    ax.text(0.2, 5, '🎯', fontsize=20, ha='center')
    ax.text(0.2, 3, '✂️', fontsize=20, ha='center')
    ax.text(0.2, 1.5, '🔧', fontsize=20, ha='center')
    
    # Add title
    ax.text(5, 19.8, 'Transformer Autoencoder Model Architecture', 
            ha='center', va='center', fontsize=16, weight='bold')
    
    # Add parameter info
    param_text = """Model Parameters:
• Total Params: 2,484,848
• Model Size: ~9.48 MB
• Input Dim: 128
• Time Steps: 20
• Batch Size: 4
• Endmembers: 481
• Sparsity: 16/481
• Attention Heads: 4
• Transformer Layers: 3"""
    
    ax.text(9.8, 10, param_text, 
            ha='left', va='center', fontsize=9, 
            bbox=dict(boxstyle="round,pad=0.3", facecolor='lightgray', alpha=0.8))
    
    plt.tight_layout()
    plt.savefig('model_architecture_english.png', dpi=300, bbox_inches='tight')
    plt.savefig('model_architecture_english.pdf', bbox_inches='tight')
    print("English model architecture saved as model_architecture_english.png and .pdf")
    plt.show()

def draw_data_flow_diagram():
    """Draw data flow diagram"""
    fig, ax = plt.subplots(1, 1, figsize=(14, 10))
    ax.set_xlim(0, 14)
    ax.set_ylim(0, 10)
    ax.axis('off')
    
    # Data flow steps
    flow_steps = [
        ("Complex Signal\n8×8 Matrix", (2, 9), '#E3F2FD'),
        ("Real/Imag Split\n128D Features", (6, 9), '#F3E5F5'),
        ("Batch Data\n(4,128,20)", (10, 9), '#E8F5E8'),
        ("Transformer\nEncoding", (2, 7), '#E8F5E8'),
        ("Multi-Scale\nAttention", (6, 7), '#E8F5E8'),
        ("Sparse Rep\n(4,481,20)", (10, 7), '#FFF3E0'),
        ("TopK Sparse\n16 Non-zeros", (2, 5), '#FFEB3B'),
        ("L1 Normalize\nSum to 1", (6, 5), '#FFEB3B'),
        ("Matrix Recon\nY=X@D", (10, 5), '#FFCDD2'),
        ("Reconstructed\n(4,128,20)", (2, 3), '#FFCDD2'),
        ("Loss Compute\nRMSE+Sparse+Smooth", (6, 3), '#FFCDD2'),
        ("Backprop\nAdam Optimizer", (10, 3), '#C8E6C9'),
    ]
    
    # Draw flow steps
    for i, (text, pos, color) in enumerate(flow_steps):
        box = FancyBboxPatch((pos[0]-0.8, pos[1]-0.4), 1.6, 0.8, 
                             boxstyle="round,pad=0.1", 
                             facecolor=color, 
                             edgecolor='black')
        ax.add_patch(box)
        ax.text(pos[0], pos[1], text, 
                ha='center', va='center', fontsize=9, weight='bold')
    
    # Add arrows
    arrow_props = dict(arrowstyle='->', lw=2, color='blue')
    
    # Horizontal arrows
    for i in range(0, len(flow_steps), 3):
        if i+1 < len(flow_steps):
            ax.annotate('', xy=(flow_steps[i+1][1][0]-0.8, flow_steps[i+1][1][1]), 
                        xytext=(flow_steps[i][1][0]+0.8, flow_steps[i][1][1]),
                        arrowprops=arrow_props)
        if i+2 < len(flow_steps):
            ax.annotate('', xy=(flow_steps[i+2][1][0]-0.8, flow_steps[i+2][1][1]), 
                        xytext=(flow_steps[i+1][1][0]+0.8, flow_steps[i+1][1][1]),
                        arrowprops=arrow_props)
    
    # Vertical arrows
    for i in [2, 5, 8]:
        if i+3 < len(flow_steps):
            ax.annotate('', xy=(flow_steps[i+3][1][0], flow_steps[i+3][1][1]+0.4), 
                        xytext=(flow_steps[i][1][0], flow_steps[i][1][1]-0.4),
                        arrowprops=arrow_props)
    
    ax.text(7, 9.7, 'Data Flow Diagram', ha='center', va='center', fontsize=16, weight='bold')
    
    plt.tight_layout()
    plt.savefig('data_flow_diagram_english.png', dpi=300, bbox_inches='tight')
    print("English data flow diagram saved as data_flow_diagram_english.png")
    plt.show()

def draw_attention_mechanism():
    """Draw attention mechanism detail"""
    fig, ax = plt.subplots(1, 1, figsize=(12, 8))
    ax.set_xlim(0, 12)
    ax.set_ylim(0, 8)
    ax.axis('off')
    
    # Input
    input_box = FancyBboxPatch((1, 7), 2, 0.6, 
                               boxstyle="round,pad=0.1", 
                               facecolor='lightblue', 
                               edgecolor='black')
    ax.add_patch(input_box)
    ax.text(2, 7.3, 'Input Features\n(20,4,128)', ha='center', va='center', fontsize=10, weight='bold')
    
    # Multi-scale convolutions
    conv_kernels = [3, 5, 7, 9]
    for i, kernel in enumerate(conv_kernels):
        conv_box = FancyBboxPatch((1 + i*2, 5.5), 1.5, 0.8, 
                                  boxstyle="round,pad=0.1", 
                                  facecolor='lightgreen', 
                                  edgecolor='green')
        ax.add_patch(conv_box)
        ax.text(1.75 + i*2, 5.9, f'Conv1D\nkernel={kernel}', 
                ha='center', va='center', fontsize=9)
        
        # Connection arrows
        ax.annotate('', xy=(1.75 + i*2, 5.5), xytext=(2, 7),
                    arrowprops=dict(arrowstyle='->', lw=1.5, color='blue'))
    
    # Original features
    orig_box = FancyBboxPatch((9, 5.5), 1.5, 0.8, 
                              boxstyle="round,pad=0.1", 
                              facecolor='lightgreen', 
                              edgecolor='green')
    ax.add_patch(orig_box)
    ax.text(9.75, 5.9, 'Original\nFeatures', ha='center', va='center', fontsize=9)
    ax.annotate('', xy=(9.75, 5.5), xytext=(2, 7),
                arrowprops=dict(arrowstyle='->', lw=1.5, color='blue'))
    
    # Feature fusion
    fusion_box = FancyBboxPatch((4, 4), 4, 0.8, 
                                boxstyle="round,pad=0.1", 
                                facecolor='yellow', 
                                edgecolor='orange')
    ax.add_patch(fusion_box)
    ax.text(6, 4.4, 'Learnable Weight Fusion\nSoftmax(w) × Features', 
            ha='center', va='center', fontsize=10, weight='bold')
    
    # Fusion arrows
    for i in range(5):
        start_x = 1.75 + i*2 if i < 4 else 9.75
        ax.annotate('', xy=(6, 4), xytext=(start_x, 5.5),
                    arrowprops=dict(arrowstyle='->', lw=1.5, color='orange'))
    
    # Self-attention
    attention_box = FancyBboxPatch((4, 2.5), 4, 0.8, 
                                   boxstyle="round,pad=0.1", 
                                   facecolor='lightcoral', 
                                   edgecolor='red')
    ax.add_patch(attention_box)
    ax.text(6, 2.9, 'Multi-Head Self-Attention\n4 heads, d_model=128', 
            ha='center', va='center', fontsize=10, weight='bold')
    
    ax.annotate('', xy=(6, 2.5), xytext=(6, 4),
                arrowprops=dict(arrowstyle='->', lw=2, color='red'))
    
    # Output
    output_box = FancyBboxPatch((4, 1), 4, 0.8, 
                                boxstyle="round,pad=0.1", 
                                facecolor='lightgray', 
                                edgecolor='black')
    ax.add_patch(output_box)
    ax.text(6, 1.4, 'Attention Output\n+ Residual Connection', 
            ha='center', va='center', fontsize=10, weight='bold')
    
    ax.annotate('', xy=(6, 1), xytext=(6, 2.5),
                arrowprops=dict(arrowstyle='->', lw=2, color='black'))
    
    ax.text(6, 7.7, 'Multi-Scale Attention Mechanism Details', 
            ha='center', va='center', fontsize=14, weight='bold')
    
    plt.tight_layout()
    plt.savefig('attention_mechanism_english.png', dpi=300, bbox_inches='tight')
    print("English attention mechanism saved as attention_mechanism_english.png")
    plt.show()

if __name__ == "__main__":
    print("Generating English model architecture diagram...")
    draw_model_architecture()
    
    print("\nGenerating English data flow diagram...")
    draw_data_flow_diagram()
    
    print("\nGenerating English attention mechanism diagram...")
    draw_attention_mechanism()
    
    print("\nAll English diagrams generated successfully!") 