import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as Data
from torch.nn import TransformerEncoder, TransformerEncoderLayer
import math
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split

# 确保使用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 自定义数据集类（新增长度校准）
class ECGDataset(Data.Dataset):
    def __init__(self, df, seq_len=187):
        """初始化函数，增加序列长度标准化"""
        self.seq_len = seq_len
        self.features = np.array(df['heartbeat_signals'].apply(
            lambda x: np.array(list(map(float, x.split(','))), dtype=np.float32)
        ))
        # 标准化序列长度
        self.features = np.array([np.pad(f, (0, max(0, seq_len - len(f))), 'constant', constant_values=0) 
                                if len(f) < seq_len else f[:seq_len] 
                                for f in self.features])
        self.labels = np.array(df['label'].apply(lambda x: int(x)))

    def __len__(self):
        return len(self.features)

    def __getitem__(self, idx):
        return self.features[idx], self.labels[idx]

# 加载数据函数（包含三阶段划分）
def load_data(batch_size, test_size=0.2, val_size=0.125):
    df_train = pd.read_csv('/mnt/ECG-GPT/train.csv')
    
    # 先划分训练集和测试集
    df_full_train, df_test = train_test_split(
        df_train, test_size=test_size, random_state=42, stratify=df_train['label']
    )
    
    # 再划分训练集和验证集
    df_train, df_val = train_test_split(
        df_full_train, test_size=val_size, random_state=42, stratify=df_full_train['label']
    )
    
    # 创建数据集
    train_dataset = ECGDataset(df_train)
    val_dataset = ECGDataset(df_val)
    test_dataset = ECGDataset(df_test)
    
    # 创建DataLoader
    train_loader = Data.DataLoader(
        dataset=train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=2,
        pin_memory=True,
        drop_last=True
    )
    
    val_loader = Data.DataLoader(
        dataset=val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=2,
        pin_memory=True
    )
    
    test_loader = Data.DataLoader(
        dataset=test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=2,
        pin_memory=True
    )
    
    return train_loader, val_loader, test_loader

# 定义可视化函数（移除了R²曲线）
def plot_training_curves(loss_history, train_acc_history, val_acc_history):
    epochs = range(1, len(loss_history) + 1)
    plt.figure(figsize=(12, 5))
    
    # 训练损失
    plt.subplot(1, 3, 1)
    plt.plot(epochs, loss_history, 'b', label='Training Loss')
    plt.title('Training Loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    
    # 训练准确率
    plt.subplot(1, 3, 2)
    plt.plot(epochs, train_acc_history, 'b', label='Training Accuracy')
    plt.title('Training Accuracy')
    plt.xlabel('Epochs')
    plt.ylabel('Accuracy')
    plt.legend()
    
    # 验证准确率
    plt.subplot(1, 3, 3)
    plt.plot(epochs, val_acc_history, 'g', label='Validation Accuracy')
    plt.title('Validation Accuracy')
    plt.xlabel('Epochs')
    plt.ylabel('Accuracy')
    plt.legend()
    
    plt.tight_layout()
    plt.show()

# 定义基于Transformer Encoder的模型
class ECGTransformerClassifier(nn.Module):
    def __init__(self, input_dim=187, d_model=256, nhead=8, num_layers=4, 
                dim_feedforward=512, output_dim=4, dropout=0.1):
        super().__init__()
        self.d_model = d_model
        
        # 输入投影层：将单个时间步特征投影到d_model维度
        self.embedding = nn.Linear(1, d_model)  # 输入维度修正为1
        self.pos_encoder = PositionalEncoding(d_model, dropout)
        
        # Transformer编码器层
        encoder_layer = TransformerEncoderLayer(
            d_model=d_model,
            nhead=nhead,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=False  # 使用seq_len作为第一维度
        )
        self.transformer_encoder = TransformerEncoder(encoder_layer, num_layers)
        
        # 输出层
        self.fc_out = nn.Sequential(
            nn.Linear(d_model, d_model//2),
            nn.ReLU(),
            nn.LayerNorm(d_model//2),
            nn.Linear(d_model//2, output_dim)
        )
        
        self._reset_parameters()

    def _reset_parameters(self):
        """参数初始化"""
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def forward(self, src):
        """
        前向传播
        Args:
            src: 输入张量 shape [batch_size, input_dim=187]
        Returns:
            输出张量 shape [batch_size, output_dim=4]
        """
        # 添加通道维度并投影到d_model
        src = src.unsqueeze(-1)  # [B, L, 1]
        src = self.embedding(src) * math.sqrt(self.d_model)  # [B, L, d_model]
        
        # 添加位置编码
        src = self.pos_encoder(src)  # [B, L, d_model]
        
        # 转换为 [L, B, d_model] 以适配Transformer
        src = src.transpose(0, 1)  # [L, B, d_model]
        
        # Transformer处理
        memory = self.transformer_encoder(src)
        
        # 全局平均池化 + 输出层
        output = self.fc_out(memory.mean(dim=0))  # [B, output_dim]
        return output

# 位置编码实现
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)  # [1, max_len, d_model]
        self.register_buffer('pe', pe)

    def forward(self, x):
        # x shape: [B, L, d_model]
        x = x + self.pe[:, :x.size(1), :]  # 广播机制自动扩展
        return self.dropout(x)

# 新增评估函数
def evaluate_model(model, data_loader, criterion):
    """评估函数，返回loss和accuracy"""
    model.eval()
    running_loss = 0.0
    running_corrects = 0
    
    with torch.no_grad():
        for inputs, labels in tqdm(data_loader, desc="Evaluating", leave=False):
            inputs = inputs.to(device, non_blocking=True)
            labels = labels.to(device, non_blocking=True)
            
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            
            _, preds = torch.max(outputs, 1)
            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels)
    
    # 计算指标
    epoch_loss = running_loss / len(data_loader.dataset)
    epoch_acc = running_corrects.double() / len(data_loader.dataset)
    
    return epoch_loss, epoch_acc.item()

# 修改后的训练函数
def train_model(model, train_loader, val_loader, test_loader, criterion, optimizer, scheduler, num_epochs):
    """包含验证和测试阶段的训练函数"""
    model = model.to(device)
    best_val_acc = 0.0
    
    # 用于记录指标的历史
    loss_history = []
    train_acc_history = []
    val_acc_history = []
    test_results = []
    
    for epoch in range(num_epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_corrects = 0
        progress_bar = tqdm(train_loader, desc=f"Epoch {epoch+1}/{num_epochs}")
        
        for inputs, labels in progress_bar:
            inputs = inputs.to(device, non_blocking=True)
            labels = labels.to(device, non_blocking=True)
            
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            
            _, preds = torch.max(outputs, 1)
            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels)
            
            progress_bar.set_postfix(loss=loss.item())
        
        # 计算训练集指标
        epoch_loss = running_loss / len(train_loader.dataset)
        epoch_acc = running_corrects.double() / len(train_loader.dataset)
        loss_history.append(epoch_loss)
        train_acc_history.append(epoch_acc.item())
        
        # 验证阶段
        model.eval()
        val_loss, val_acc = evaluate_model(model, val_loader, criterion)
        val_acc_history.append(val_acc)
        
        print(f'Epoch {epoch+1}/{num_epochs} | '
              f'Train Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f} | '
              f'Val Loss: {val_loss:.4f} Acc: {val_acc:.4f}')
        
        # 模型保存与测试
        if (epoch + 1) % 15 == 0:
            # 保存模型
            save_path = f'./ecg_transformer_classifier_epoch_{epoch+1}_val_acc_{val_acc:.4f}.pth'
            torch.save(model.state_dict(), save_path)
            print(f"模型已保存到 {save_path}")
            
            # 在测试集上评估
            model.eval()
            test_loss, test_acc = evaluate_model(model, test_loader, criterion)
            test_results.append({
                'epoch': epoch+1,
                'loss': test_loss,
                'acc': test_acc
            })
            print(f'测试集评估结果 - Loss: {test_loss:.4f} Acc: {test_acc:.4f}')
            
            # 记录最佳验证集性能
            if val_acc > best_val_acc:
                best_val_acc = val_acc
                torch.save(model.state_dict(), './best_model.pth')
                print("已更新最佳模型")
        
        scheduler.step()
    
    # 返回所有指标历史
    return {
        'loss_history': loss_history,
        'train_acc_history': train_acc_history,
        'val_acc_history': val_acc_history,
        'test_results': test_results
    }

# 主函数
def main():
    # 设置超参数
    batch_size = 64
    learning_rate = 0.0001
    num_epochs = 1200
    
    # 加载数据（包含三阶段划分）
    train_loader, val_loader, test_loader = load_data(batch_size)
    
    # 初始化模型、损失函数和优化器
    model = ECGTransformerClassifier().to(device)
    criterion = nn.CrossEntropyLoss()
    
    # 使用分组参数学习率
    optimizer = optim.Adam([
        {'params': model.transformer_encoder.parameters(), 'lr': learning_rate},
        {'params': model.fc_out.parameters(), 'lr': learning_rate*2}
    ])
    
    # 新增余弦退火调度器
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    
    # 训练模型
    metrics = train_model(
        model=model,
        train_loader=train_loader,
        val_loader=val_loader,
        test_loader=test_loader,
        criterion=criterion,
        optimizer=optimizer,
        scheduler=scheduler,
        num_epochs=num_epochs
    )
    
    # 绘制训练曲线
    plot_training_curves(
        metrics['loss_history'],
        metrics['train_acc_history'],
        metrics['val_acc_history']
    )
    
    # 输出最佳验证集结果
    print(f"最佳验证集准确率: {max(metrics['val_acc_history']):.4f}")
    
    # 可选：保存最终模型
    if metrics['test_results']:
        final_test_metrics = metrics['test_results'][-1]
        final_save_path = f'./ecg_transformer_classifier_final_test_acc_{final_test_metrics["acc"]:.4f}.pth'
        torch.save(model.state_dict(), final_save_path)
        print(f"最终模型已保存到 {final_save_path}")

if __name__ == '__main__':
    main()