"""
绘制模型架构图
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.patches import FancyBboxPatch, ConnectionPatch
import numpy as np

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

def draw_model_architecture():
    """绘制模型架构图"""
    fig, ax = plt.subplots(1, 1, figsize=(16, 20))
    ax.set_xlim(0, 10)
    ax.set_ylim(0, 20)
    ax.axis('off')
    
    # 定义颜色
    colors = {
        'input': '#E3F2FD',      # 浅蓝色
        'transform': '#F3E5F5',   # 浅紫色
        'attention': '#E8F5E8',   # 浅绿色
        'output': '#FFF3E0',      # 浅橙色
        'loss': '#FFEBEE',        # 浅红色
        'text': '#333333'         # 深灰色
    }
    
    # 1. 输入层
    y_pos = 19
    input_box = FancyBboxPatch((1, y_pos-0.4), 8, 0.8, 
                               boxstyle="round,pad=0.1", 
                               facecolor=colors['input'], 
                               edgecolor='black', linewidth=2)
    ax.add_patch(input_box)
    ax.text(5, y_pos, '输入: 复数信号 (4, 128, 20)\n8×8复数矩阵 → 实部虚部分离 → 128维特征', 
            ha='center', va='center', fontsize=12, weight='bold')
    
    # 2. 维度转换
    y_pos = 17.5
    transform_box = FancyBboxPatch((1.5, y_pos-0.3), 7, 0.6, 
                                   boxstyle="round,pad=0.1", 
                                   facecolor=colors['transform'], 
                                   edgecolor='black')
    ax.add_patch(transform_box)
    ax.text(5, y_pos, '维度转换: (4,128,20) → (20,4,128)', 
            ha='center', va='center', fontsize=11)
    
    # 3. 位置编码
    y_pos = 16
    pos_box = FancyBboxPatch((1.5, y_pos-0.3), 7, 0.6, 
                             boxstyle="round,pad=0.1", 
                             facecolor=colors['transform'], 
                             edgecolor='black')
    ax.add_patch(pos_box)
    ax.text(5, y_pos, '位置编码: 可学习编码 (1,4,128) + 输入', 
            ha='center', va='center', fontsize=11)
    
    # 4. Transformer编码器层 (3层)
    for i in range(3):
        y_base = 14.5 - i * 3
        
        # 外框
        transformer_box = FancyBboxPatch((0.5, y_base-1.4), 9, 2.8, 
                                         boxstyle="round,pad=0.1", 
                                         facecolor=colors['attention'], 
                                         edgecolor='black', linewidth=2)
        ax.add_patch(transformer_box)
        ax.text(5, y_base+1.2, f'Transformer编码器层 {i+1}', 
                ha='center', va='center', fontsize=12, weight='bold')
        
        # 多尺度注意力
        attention_box = FancyBboxPatch((1, y_base+0.2), 8, 0.8, 
                                       boxstyle="round,pad=0.05", 
                                       facecolor='white', 
                                       edgecolor='blue')
        ax.add_patch(attention_box)
        ax.text(5, y_base+0.6, '多尺度注意力机制', 
                ha='center', va='center', fontsize=10, weight='bold')
        
        # 卷积核
        conv_kernels = ['3', '5', '7', '9', '原始']
        for j, kernel in enumerate(conv_kernels):
            conv_box = FancyBboxPatch((1.5 + j*1.5, y_base-0.1), 1.2, 0.3, 
                                      boxstyle="round,pad=0.02", 
                                      facecolor='lightblue', 
                                      edgecolor='blue')
            ax.add_patch(conv_box)
            ax.text(2.1 + j*1.5, y_base+0.05, f'K={kernel}', 
                    ha='center', va='center', fontsize=8)
        
        # 前馈网络
        ffn_box = FancyBboxPatch((1, y_base-0.8), 8, 0.5, 
                                 boxstyle="round,pad=0.05", 
                                 facecolor='lightgreen', 
                                 edgecolor='green')
        ax.add_patch(ffn_box)
        ax.text(5, y_base-0.55, '前馈网络: 128 → 1024 → 128', 
                ha='center', va='center', fontsize=10)
    
    # 5. 输出层
    y_pos = 5
    output_box = FancyBboxPatch((1, y_pos-0.8), 8, 1.6, 
                                boxstyle="round,pad=0.1", 
                                facecolor=colors['output'], 
                                edgecolor='black', linewidth=2)
    ax.add_patch(output_box)
    ax.text(5, y_pos+0.4, '稀疏表示生成网络', 
            ha='center', va='center', fontsize=12, weight='bold')
    ax.text(5, y_pos, 'Linear: 128 → 512 → 481', 
            ha='center', va='center', fontsize=10)
    ax.text(5, y_pos-0.4, '输出: (4, 481, 20)', 
            ha='center', va='center', fontsize=10)
    
    # 6. 稀疏化处理
    y_pos = 3
    sparse_box = FancyBboxPatch((1, y_pos-0.5), 8, 1, 
                                boxstyle="round,pad=0.1", 
                                facecolor='yellow', 
                                edgecolor='orange', linewidth=2)
    ax.add_patch(sparse_box)
    ax.text(5, y_pos+0.2, 'TopK稀疏化', 
            ha='center', va='center', fontsize=12, weight='bold')
    ax.text(5, y_pos-0.2, '每个时间步保留前16个最大值 + L1归一化', 
            ha='center', va='center', fontsize=10)
    
    # 7. 重构层
    y_pos = 1.5
    recon_box = FancyBboxPatch((1, y_pos-0.5), 8, 1, 
                               boxstyle="round,pad=0.1", 
                               facecolor='lightcoral', 
                               edgecolor='red', linewidth=2)
    ax.add_patch(recon_box)
    ax.text(5, y_pos+0.2, '信号重构', 
            ha='center', va='center', fontsize=12, weight='bold')
    ax.text(5, y_pos-0.2, 'Y_recon = X @ D, 输出: (4, 128, 20)', 
            ha='center', va='center', fontsize=10)
    
    # 添加箭头连接
    arrow_props = dict(arrowstyle='->', lw=2, color='black')
    
    # 连接各层
    positions = [19, 17.5, 16, 14.5, 11.5, 8.5, 5, 3, 1.5]
    for i in range(len(positions)-1):
        ax.annotate('', xy=(5, positions[i+1]+0.4), xytext=(5, positions[i]-0.4),
                    arrowprops=arrow_props)
    
    # 添加侧边说明
    ax.text(0.2, 19, '📥', fontsize=20, ha='center')
    ax.text(0.2, 14, '🧠', fontsize=20, ha='center')
    ax.text(0.2, 5, '🎯', fontsize=20, ha='center')
    ax.text(0.2, 3, '✂️', fontsize=20, ha='center')
    ax.text(0.2, 1.5, '🔧', fontsize=20, ha='center')
    
    # 添加标题
    ax.text(5, 19.8, 'Transformer自编码器模型架构', 
            ha='center', va='center', fontsize=16, weight='bold')
    
    # 添加参数信息
    param_text = """
模型参数:
• 总参数: 2,484,848
• 模型大小: ~9.48 MB
• 输入维度: 128
• 时间步长: 20
• 批次大小: 4
• 端元数量: 481
• 稀疏度: 16/481
• 注意力头数: 4
• Transformer层数: 3
"""
    ax.text(9.8, 10, param_text, 
            ha='left', va='center', fontsize=9, 
            bbox=dict(boxstyle="round,pad=0.3", facecolor='lightgray', alpha=0.8))
    
    plt.tight_layout()
    plt.savefig('model_architecture.png', dpi=300, bbox_inches='tight')
    plt.savefig('model_architecture.pdf', bbox_inches='tight')
    print("模型架构图已保存为 model_architecture.png 和 model_architecture.pdf")
    plt.show()

def draw_data_flow_diagram():
    """绘制数据流程图"""
    fig, ax = plt.subplots(1, 1, figsize=(14, 10))
    ax.set_xlim(0, 14)
    ax.set_ylim(0, 10)
    ax.axis('off')
    
    # 数据流程
    flow_steps = [
        ("复数信号\n8×8矩阵", (2, 9), '#E3F2FD'),
        ("实部虚部分离\n128维特征", (6, 9), '#F3E5F5'),
        ("批次数据\n(4,128,20)", (10, 9), '#E8F5E8'),
        ("Transformer\n编码", (2, 7), '#E8F5E8'),
        ("多尺度注意力\n特征提取", (6, 7), '#E8F5E8'),
        ("稀疏表示\n(4,481,20)", (10, 7), '#FFF3E0'),
        ("TopK稀疏化\n16个非零", (2, 5), '#FFEB3B'),
        ("L1归一化\n和为1", (6, 5), '#FFEB3B'),
        ("矩阵重构\nY=X@D", (10, 5), '#FFCDD2'),
        ("重构信号\n(4,128,20)", (2, 3), '#FFCDD2'),
        ("损失计算\nRMSE+稀疏+平滑", (6, 3), '#FFCDD2'),
        ("反向传播\nAdam优化", (10, 3), '#C8E6C9'),
    ]
    
    # 绘制流程步骤
    for i, (text, pos, color) in enumerate(flow_steps):
        box = FancyBboxPatch((pos[0]-0.8, pos[1]-0.4), 1.6, 0.8, 
                             boxstyle="round,pad=0.1", 
                             facecolor=color, 
                             edgecolor='black')
        ax.add_patch(box)
        ax.text(pos[0], pos[1], text, 
                ha='center', va='center', fontsize=9, weight='bold')
    
    # 添加箭头
    arrow_props = dict(arrowstyle='->', lw=2, color='blue')
    
    # 水平箭头
    for i in range(0, len(flow_steps), 3):
        if i+1 < len(flow_steps):
            ax.annotate('', xy=(flow_steps[i+1][1][0]-0.8, flow_steps[i+1][1][1]), 
                        xytext=(flow_steps[i][1][0]+0.8, flow_steps[i][1][1]),
                        arrowprops=arrow_props)
        if i+2 < len(flow_steps):
            ax.annotate('', xy=(flow_steps[i+2][1][0]-0.8, flow_steps[i+2][1][1]), 
                        xytext=(flow_steps[i+1][1][0]+0.8, flow_steps[i+1][1][1]),
                        arrowprops=arrow_props)
    
    # 垂直箭头
    for i in [2, 5, 8]:
        if i+3 < len(flow_steps):
            ax.annotate('', xy=(flow_steps[i+3][1][0], flow_steps[i+3][1][1]+0.4), 
                        xytext=(flow_steps[i][1][0], flow_steps[i][1][1]-0.4),
                        arrowprops=arrow_props)
    
    ax.text(7, 9.7, '数据流程图', ha='center', va='center', fontsize=16, weight='bold')
    
    plt.tight_layout()
    plt.savefig('data_flow_diagram.png', dpi=300, bbox_inches='tight')
    print("数据流程图已保存为 data_flow_diagram.png")
    plt.show()

def draw_attention_mechanism():
    """绘制注意力机制详细图"""
    fig, ax = plt.subplots(1, 1, figsize=(12, 8))
    ax.set_xlim(0, 12)
    ax.set_ylim(0, 8)
    ax.axis('off')
    
    # 输入
    input_box = FancyBboxPatch((1, 7), 2, 0.6, 
                               boxstyle="round,pad=0.1", 
                               facecolor='lightblue', 
                               edgecolor='black')
    ax.add_patch(input_box)
    ax.text(2, 7.3, '输入特征\n(20,4,128)', ha='center', va='center', fontsize=10, weight='bold')
    
    # 多尺度卷积
    conv_kernels = [3, 5, 7, 9]
    for i, kernel in enumerate(conv_kernels):
        conv_box = FancyBboxPatch((1 + i*2, 5.5), 1.5, 0.8, 
                                  boxstyle="round,pad=0.1", 
                                  facecolor='lightgreen', 
                                  edgecolor='green')
        ax.add_patch(conv_box)
        ax.text(1.75 + i*2, 5.9, f'Conv1D\nkernel={kernel}', 
                ha='center', va='center', fontsize=9)
        
        # 连接箭头
        ax.annotate('', xy=(1.75 + i*2, 5.5), xytext=(2, 7),
                    arrowprops=dict(arrowstyle='->', lw=1.5, color='blue'))
    
    # 原始特征
    orig_box = FancyBboxPatch((9, 5.5), 1.5, 0.8, 
                              boxstyle="round,pad=0.1", 
                              facecolor='lightgreen', 
                              edgecolor='green')
    ax.add_patch(orig_box)
    ax.text(9.75, 5.9, '原始特征', ha='center', va='center', fontsize=9)
    ax.annotate('', xy=(9.75, 5.5), xytext=(2, 7),
                arrowprops=dict(arrowstyle='->', lw=1.5, color='blue'))
    
    # 特征融合
    fusion_box = FancyBboxPatch((4, 4), 4, 0.8, 
                                boxstyle="round,pad=0.1", 
                                facecolor='yellow', 
                                edgecolor='orange')
    ax.add_patch(fusion_box)
    ax.text(6, 4.4, '可学习权重融合\nSoftmax(w) × 特征', 
            ha='center', va='center', fontsize=10, weight='bold')
    
    # 连接融合箭头
    for i in range(5):
        start_x = 1.75 + i*2 if i < 4 else 9.75
        ax.annotate('', xy=(6, 4), xytext=(start_x, 5.5),
                    arrowprops=dict(arrowstyle='->', lw=1.5, color='orange'))
    
    # 自注意力
    attention_box = FancyBboxPatch((4, 2.5), 4, 0.8, 
                                   boxstyle="round,pad=0.1", 
                                   facecolor='lightcoral', 
                                   edgecolor='red')
    ax.add_patch(attention_box)
    ax.text(6, 2.9, '多头自注意力\n4个头, d_model=128', 
            ha='center', va='center', fontsize=10, weight='bold')
    
    ax.annotate('', xy=(6, 2.5), xytext=(6, 4),
                arrowprops=dict(arrowstyle='->', lw=2, color='red'))
    
    # 输出
    output_box = FancyBboxPatch((4, 1), 4, 0.8, 
                                boxstyle="round,pad=0.1", 
                                facecolor='lightgray', 
                                edgecolor='black')
    ax.add_patch(output_box)
    ax.text(6, 1.4, '注意力输出\n+ 残差连接', 
            ha='center', va='center', fontsize=10, weight='bold')
    
    ax.annotate('', xy=(6, 1), xytext=(6, 2.5),
                arrowprops=dict(arrowstyle='->', lw=2, color='black'))
    
    ax.text(6, 7.7, '多尺度注意力机制详细结构', 
            ha='center', va='center', fontsize=14, weight='bold')
    
    plt.tight_layout()
    plt.savefig('attention_mechanism.png', dpi=300, bbox_inches='tight')
    print("注意力机制图已保存为 attention_mechanism.png")
    plt.show()

if __name__ == "__main__":
    print("正在生成模型架构图...")
    draw_model_architecture()
    
    print("\n正在生成数据流程图...")
    draw_data_flow_diagram()
    
    print("\n正在生成注意力机制图...")
    draw_attention_mechanism()
    
    print("\n所有图表已生成完成！") 