import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
from tqdm import tqdm  # Ensure correct import
import math

# 配置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 1. 合成数据生成
class CurveDataset(Dataset):
    def __init__(self, seq_length=10, num_samples=1000):
        t = np.linspace(0, 4 * np.pi, num_samples + seq_length)
        x = 0.5 * t * np.cos(t)
        y = 0.5 * t * np.sin(t)
        
        self.X = []
        self.Y = []
        for i in range(num_samples):
            self.X.append(x[i:i+seq_length])
            self.Y.append(y[i:i+seq_length])
        
        self.X = torch.tensor(self.X, dtype=torch.float32).unsqueeze(-1)
        self.Y = torch.tensor(self.Y, dtype=torch.float32).unsqueeze(-1)
        
        # 全局归一化
        self.X_mean = self.X.mean()
        self.X_std = self.X.std()
        self.X = (self.X - self.X_mean) / self.X_std
        
        self.Y_mean = self.Y.mean()
        self.Y_std = self.Y.std()
        self.Y = (self.Y - self.Y_mean) / self.Y_std
       
    def __len__(self):
        return len(self.X)

    def __getitem__(self, idx):
        return self.X[idx].to(device), self.Y[idx].to(device)

# 2. 位置编码模块
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len=5000):  # 修复多余的冒号
        super().__init__()
        position = torch.arange(max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
        pe = torch.zeros(max_len, d_model)
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe.unsqueeze(0))

    def forward(self, x):
        return x + self.pe[:, :x.size(1)]

# 3. Transformer模型架构
class CurveTransformer(nn.Module):
    def __init__(self, input_dim=1, d_model=128, nhead=8, num_layers=4, dim_feedforward=512):
        super().__init__()
        self.input_proj = nn.Linear(input_dim, d_model)
        self.pos_encoder = PositionalEncoding(d_model)
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model, 
            nhead=nhead, 
            dim_feedforward=dim_feedforward,
            dropout=0.1,
            batch_first=True
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
        self.output_layer = nn.Linear(d_model, 1)  # 输出为单个y值

    def forward(self, src):
        src = self.input_proj(src)
        src = self.pos_encoder(src)
        output = self.transformer(src)
        return self.output_layer(output)  # 输出整个序列的y值

# 4. 训练配置
def train_model():
    # 数据集参数
    SEQ_LENGTH = 20
    NUM_SAMPLES = 5000
    BATCH_SIZE = 64
    
    # 初始化数据集和数据加载器
    dataset = CurveDataset(seq_length=SEQ_LENGTH, num_samples=NUM_SAMPLES)
    dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)
    
    # 模型参数
    model = CurveTransformer(
        input_dim=1,
        d_model=256,  # 增大隐藏层维度
        nhead=8,
        num_layers=6,  # 增加层数
        dim_feedforward=1024
    ).to(device)

    
    # 训练参数
    criterion = nn.MSELoss()  # 使用均方误差损失
    optimizer = optim.Adam(model.parameters(), lr=1e-4)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
    NUM_EPOCHS = 50

    # 训练循环
    for epoch in tqdm(range(NUM_EPOCHS)):
        model.train()
        total_loss = 0
        for inputs, targets in dataloader:
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        scheduler.step()
        avg_loss = total_loss / len(dataloader)
        print(f"Epoch [{epoch+1}/{NUM_EPOCHS}], Loss: {avg_loss:.6f}")
    return model

# 5. 训练并保存模型
trained_model = train_model()
torch.save(trained_model.state_dict(), "curve_transformer.pth")

# 6. 推理演示
def inference(model, dataset, seq_length=20):
    model.eval()
    with torch.no_grad():
        test_t = np.linspace(4 * np.pi - seq_length * 0.1, 4 * np.pi, seq_length)  # 测试数据在训练范围内
        test_x = 0.5 * test_t * np.cos(test_t)
        test_y = 0.5 * test_t * np.sin(test_t)
        
        # 应用训练数据的归一化
        input_seq = (torch.tensor(test_x, dtype=torch.float32).unsqueeze(0).unsqueeze(-1) - dataset.X_mean) / dataset.X_std
        input_seq = input_seq.to(device)
        
        prediction = model(input_seq).squeeze(-1).squeeze(0).cpu().numpy()
        # 反归一化
        prediction = prediction * dataset.Y_std.cpu().numpy() + dataset.Y_mean.cpu().numpy()  # 转换为 NumPy 类型
        test_y = (test_y - dataset.Y_mean.cpu().numpy()) / dataset.Y_std.cpu().numpy()  # 若需比较，同样归一化真实值
        
        print("\n预测与真实值对比（归一化后）:")
        print("预测y:", prediction)
        print("真实y:", test_y)


# 加载训练好的模型进行推理
loaded_model = CurveTransformer(
    input_dim=1,       # 确保与训练时一致
    d_model=256,       # 使用训练时的隐藏维度
    nhead=8,           # 使用训练时的多头注意力头数
    num_layers=6,      # 使用训练时的 Transformer 层数
    dim_feedforward=1024  # 使用训练时的前馈网络维度
).to(device)
loaded_model.load_state_dict(torch.load("curve_transformer.pth"))
inference(loaded_model, dataset=CurveDataset(seq_length=20, num_samples=50000))

