import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
import math

# ----------------------------
# 1. 必须重新定义模型类（与训练时完全一致！）
# ----------------------------
class CNNEncoder(nn.Module):
    def __init__(self, input_dim=8, hidden_dim=64, output_dim=128):
        super().__init__()
        self.conv1 = nn.Conv1d(input_dim, hidden_dim, kernel_size=7, stride=2, padding=3)
        self.conv2 = nn.Conv1d(hidden_dim, output_dim, kernel_size=5, stride=2, padding=2)
        self.relu = nn.ReLU()
        self.output_dim = output_dim

    def forward(self, x):
        x = x.transpose(1, 2)
        x = self.relu(self.conv1(x))
        x = self.relu(self.conv2(x))
        return x.transpose(1, 2)

class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len=500):
        super().__init__()
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)

    def forward(self, x):
        return x + self.pe[:, :x.size(1), :]

class GaitCNNTransformer(nn.Module):
    def __init__(self, input_dim=8, output_dim=1, d_model=128, nhead=8, num_layers=3, dropout=0.1):
        super().__init__()
        self.cnn_encoder = CNNEncoder(input_dim, hidden_dim=64, output_dim=d_model)
        self.pos_encoder = PositionalEncoding(d_model, max_len=200)
        
        encoder_layer = nn.TransformerEncoderLayer(
            d_model, nhead, dim_feedforward=512, dropout=dropout, batch_first=True
        )
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers)
        
        decoder_layer = nn.TransformerDecoderLayer(
            d_model, nhead, dim_feedforward=512, dropout=dropout, batch_first=True
        )
        self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers)
        
        self.output_proj = nn.Linear(d_model, output_dim)
        self.start_token = nn.Parameter(torch.randn(1, 1, d_model))

    def forward(self, src, tgt=None, src_key_padding_mask=None, tgt_key_padding_mask=None):
        memory = self.cnn_encoder(src)
        memory = self.pos_encoder(memory)

        # 精确计算 CNN 后的有效长度（用于 mask）
        if src_key_padding_mask is not None:
            src_valid_len = (~src_key_padding_mask).sum(dim=1)
            L1 = (src_valid_len + 2*3 - 7) // 2 + 1
            L1 = torch.clamp(L1, min=0)
            L2 = (L1 + 2*2 - 5) // 2 + 1
            L2 = torch.clamp(L2, min=0)
            T1_prime = memory.size(1)
            new_mask = torch.arange(T1_prime, device=memory.device).unsqueeze(0) >= L2.unsqueeze(1)
            src_key_padding_mask = new_mask

        memory = self.transformer_encoder(memory, src_key_padding_mask=src_key_padding_mask)

        B = src.size(0)
        if self.training and tgt is not None:
            T2 = tgt.size(1)
        else:
            T2 = 150  # 推理时生成固定长度

        tgt_tokens = self.start_token.expand(B, T2, -1)
        tgt_tokens = self.pos_encoder(tgt_tokens)
        tgt_mask = nn.Transformer.generate_square_subsequent_mask(T2, device=src.device)

        output = self.transformer_decoder(
            tgt_tokens, memory,
            tgt_mask=tgt_mask,
            tgt_key_padding_mask=None,  # 推理时无 padding
            memory_key_padding_mask=src_key_padding_mask
        )
        return self.output_proj(output)

# ----------------------------
# 2. 加载参数
# ----------------------------
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model = GaitCNNTransformer(input_dim=9).to(device)
model.load_state_dict(torch.load("gait_cnn_transformer.pth", map_location=device))
model.eval()
print("✅ 模型加载成功！")

# ----------------------------
# 3. 加载数据并预测（后续代码不变）
# ----------------------------
dataset = torch.load('dataset_3.pt', weights_only=False)

# 选择连续 8 个样本，从 idx=300 开始
start_idx = 300
num_samples = 100
samples = dataset[start_idx:start_idx + num_samples]

# 分别提取 src 和 tgt，并统一长度（不 padding，直接转为 list）
src_list = [torch.tensor(s[0], dtype=torch.float32) for s in samples]  # 每个 (T1, 9)
tgt_list = [torch.tensor(s[1], dtype=torch.float32).squeeze(-1).cpu().numpy() for s in samples]  # 每个 (T2,)

# 找到最大长度用于对齐（可选，也可各自独立画）
max_tgt_len = max(len(t) for t in tgt_list)

# 堆叠为 batch（不需要 mask，因为推理时忽略 padding）
src_padded = torch.nn.utils.rnn.pad_sequence(src_list, batch_first=True, padding_value=0).to(device)
# src_padded: (8, max_T1, 9)

with torch.no_grad():
    pred_batch = model(src_padded)  # (100, 120, 1)
    pred_list = [p.squeeze(-1).cpu().numpy() for p in pred_batch]  # list of (120,)



# ----------------------------
# 绘制重叠图
# ----------------------------
plt.figure(figsize=(15, 7))
for i in range(num_samples):
    tgt_true = tgt_list[i]
    pred = pred_list[i]
    
    min_len = min(len(tgt_true), len(pred))
    tgt_true = tgt_true[:min_len]
    pred = pred[:min_len]
    
    # 使用透明度和颜色变化来区分不同样本
    alpha = max(0.1, 0.3 - i / num_samples)  # 控制透明度
    plt.plot(tgt_true, color='blue', linewidth=0.5, marker='o', markersize=3,alpha=alpha, label='Ground Truth' if i == 0 else "")
    plt.plot(pred, color='red', linestyle='--', linewidth=0.5, marker='o', markersize=3, alpha=alpha, label='Predicted' if i == 0 else "")

plt.title(f'Gait Torque Prediction vs Ground Truth (Overlap of {num_samples} Samples)')
plt.xlabel('Time Step')
plt.ylabel('Torque')
plt.grid(True, linestyle=':', alpha=0.6)
# 只显示一次图例
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())

plt.tight_layout()
plt.savefig('torque_comparison_overlap.png', dpi=300)
plt.show()

print(f"✅ 图像已保存为 torque_comparison_overlap.png")


# ----------------------------
# 4. 可视化：8 个子图
# ----------------------------
fig, axes = plt.subplots(4, 2, figsize=(15, 12))
fig.suptitle('Gait Torque Prediction vs Ground Truth (8 Samples)', fontsize=16)

for i in range(8):
    ax = axes[i // 2, i % 2]
    tgt_true = tgt_list[i]
    pred = pred_list[i]

    # 对齐长度（取最小值）
    min_len = min(len(tgt_true), len(pred))
    tgt_true = tgt_true[:min_len]
    pred = pred[:min_len]

    ax.plot(tgt_true, label='Ground Truth', linewidth=1.8, marker='o', markersize=3)
    ax.plot(pred, label='Predicted', linestyle='--', linewidth=1.8, marker='x', markersize=3)
    ax.set_title(f'Sample #{start_idx + i}')
    ax.set_xlabel('Time Step')
    ax.set_ylabel('Torque')
    ax.grid(True, linestyle=':', alpha=0.6)
    ax.legend(fontsize=9)

plt.tight_layout(rect=[0, 0, 1, 0.96])  # 为 suptitle 留空间
plt.savefig('torque_comparison_8_samples.png', dpi=300)
plt.show()

print(f"✅ 图像已保存为 torque_comparison_8_samples.png")
for i in range(8):
    print(f"Sample {start_idx+i}: True={len(tgt_list[i])}, Pred={len(pred_list[i])}")