import os
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, cohen_kappa_score, hamming_loss
from torch.cuda.amp import GradScaler, autocast
import numpy as np
import timm  # PyTorch Image Models库

# 数据预处理（EfficientNet-B4推荐输入尺寸为380x380）
transform = transforms.Compose([
    transforms.Resize((380, 380)),  # 调整为EfficientNet-B4推荐尺寸
    transforms.RandomHorizontalFlip(),
    transforms.RandomVerticalFlip(),
    transforms.RandomRotation(45),
    transforms.RandomAffine(degrees=0, translate=(0.1, 0.1)),
    transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1),
    transforms.RandomPerspective(distortion_scale=0.2, p=0.5),  # 添加透视变换
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

# 数据集路径
data_dir = r'data'
train_data_path = os.path.join(data_dir, 'train')
val_data_path = os.path.join(data_dir, 'val')

# 加载数据集
train_dataset = datasets.ImageFolder(root=train_data_path, transform=transform)
val_dataset = datasets.ImageFolder(root=val_data_path, transform=transform)

# 创建数据加载器
batch_size = 8  # RTX 3060可能支持更大Batch Size，可以尝试调整
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)

# 初始化EfficientNet-B4模型
model = timm.create_model('tf_efficientnet_b4', pretrained=True)  # 使用预训练权重

# 修改分类层
num_classes = len(train_dataset.classes)
model.classifier = nn.Sequential(
    nn.Dropout(0.5),  # 添加Dropout正则化
    nn.Linear(model.classifier.in_features, num_classes)
)
nn.init.xavier_uniform_(model.classifier[1].weight)  # 初始化权重
model.classifier[1].bias.data.fill_(0.01)

# 将模型移动到GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)

# 计算类别权重
from sklearn.utils.class_weight import compute_class_weight
class_weights = compute_class_weight('balanced', classes=np.unique(train_dataset.targets), y=train_dataset.targets)
class_weights = torch.tensor(class_weights, dtype=torch.float).to(device)

# 定义损失函数、优化器和学习率调度器
criterion = nn.CrossEntropyLoss(weight=class_weights)
optimizer = optim.AdamW(model.parameters(), lr=0.00005, weight_decay=1e-3)  # 使用更小的学习率和更强的权重衰减
scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=5, T_mult=1, eta_min=1e-6)  # 调整学习率下限

# 混合精度训练
scaler = GradScaler()

# 早停机制
early_stopping_patience = 5  # 如果验证集准确率在5个epoch内没有提高，则停止训练
best_val_acc = 0
epochs_without_improvement = 0

# 训练函数
def train(model, device, train_loader, criterion, optimizer, epoch, scaler):
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0
    
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        
        with autocast():
            output = model(data)
            loss = criterion(output, target)
        
        scaler.scale(loss).backward()
        scaler.step(optimizer)
        scaler.update()
        
        running_loss += loss.item()
        _, predicted = output.max(1)
        total += target.size(0)
        correct += predicted.eq(target).sum().item()
        
        if batch_idx % 10 == 0:
            print(f'Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} ({100. * batch_idx / len(train_loader):.0f}%)] '
                  f'Loss: {loss.item():.6f} | Acc: {100.*correct/total:.3f}%')

# 验证函数
def validate(model, device, val_loader, criterion):
    global best_val_acc, epochs_without_improvement  # 使用全局变量
    
    model.eval()
    val_loss = 0
    all_targets = []
    all_predictions = []
    
    with torch.no_grad():
        for data, target in val_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            val_loss += criterion(output, target).item()
            _, predicted = output.max(1)
            all_targets.extend(target.cpu().numpy())
            all_predictions.extend(predicted.cpu().numpy())
    
    val_loss /= len(val_loader)
    
    metrics = {
        'loss': val_loss,
        'accuracy': accuracy_score(all_targets, all_predictions),
        'precision': precision_score(all_targets, all_predictions, average='macro', zero_division=0),
        'recall': recall_score(all_targets, all_predictions, average='macro', zero_division=0),
        'f1': f1_score(all_targets, all_predictions, average='macro', zero_division=0),
        'kappa': cohen_kappa_score(all_targets, all_predictions),
        'hamming': hamming_loss(all_targets, all_predictions)
    }
    
    print(f"\nValidation Metrics:")
    for name, value in metrics.items():
        print(f"{name.capitalize()}: {value:.4f}")
    
    # 早停逻辑
    if metrics['accuracy'] > best_val_acc:
        best_val_acc = metrics['accuracy']
        torch.save(model.state_dict(), 'best_efficientnet_b4_model.pth')
        epochs_without_improvement = 0  # 重置计数器
    else:
        epochs_without_improvement += 1
    
    return metrics

# 主训练流程
def main():
    try:
        epochs = 50  # 增加训练轮数

        print(f"当前训练使用的设备是: {device}")
        print(f"训练集大小: {len(train_dataset)} | 验证集大小: {len(val_dataset)}")

        history = {'loss': [], 'accuracy': [], 'precision': [], 'recall': [], 'f1': [], 'kappa': [], 'hamming': []}

        # 分阶段训练策略
        for epoch in range(1, epochs + 1):
            torch.cuda.empty_cache()
            
            # 前5轮冻结部分层
            if epoch <= 5:
                for param in model.blocks[0:3].parameters():  # 冻结前3个block
                    param.requires_grad = False
            else:
                for param in model.blocks[0:3].parameters():
                    param.requires_grad = True
                
            train(model, device, train_loader, criterion, optimizer, epoch, scaler)
            metrics = validate(model, device, val_loader, criterion)
            
            for name in history.keys():
                history[name].append(metrics[name])
            
            scheduler.step()
            
            # 检查是否需要早停
            if epochs_without_improvement >= early_stopping_patience:
                print(f"验证集准确率连续{early_stopping_patience}个epoch没有提高，停止训练。")
                break

        # 绘图
        plt.figure(figsize=(15, 10))
        for i, metric in enumerate(['loss', 'accuracy', 'precision', 'recall', 'f1', 'kappa'], 1):
            plt.subplot(2, 3, i)
            plt.plot(history[metric])
            plt.title(metric.capitalize())
            plt.xlabel('Epoch')
            plt.ylabel('Score' if metric != 'loss' else 'Loss')
        
        plt.tight_layout()
        plt.savefig('efficientnet_b4_training_metrics1.png')
        plt.show()

    except Exception as e:
        print(f"发生错误: {e}")

if __name__ == '__main__':
    main()