import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from transformers import ViTConfig, ViTModel, ViTImageProcessor
from tqdm import tqdm
import numpy as np
from sklearn.metrics import f1_score, roc_auc_score
import os

# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# 数据预处理
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224')
transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=processor.image_mean, std=processor.image_std)
])

# 自定义多标签数据集
class CIFAR10MultiLabel(datasets.CIFAR10):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        
    def __getitem__(self, index):
        img, target = super().__getitem__(index)
        one_hot_target = torch.zeros(10)
        one_hot_target[target] = 1
        return img, one_hot_target

# 加载数据集
train_dataset = CIFAR10MultiLabel(root='./data', train=True, download=True, transform=transform)
test_dataset = CIFAR10MultiLabel(root='./data', train=False, download=True, transform=transform)

# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)

# 自定义ViT模型
class ViTForMultiLabelClassification(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.vit = ViTModel(config)
        self.classifier = nn.Sequential(
            nn.Dropout(0.1),
            nn.Linear(config.hidden_size, 10)  # 10个类别
        )
        
    def forward(self, pixel_values):
        outputs = self.vit(pixel_values=pixel_values)
        cls_token = outputs.last_hidden_state[:, 0, :]  # [CLS] token
        logits = self.classifier(cls_token)
        return logits

# 配置并初始化模型
config = ViTConfig(
    image_size=224,
    patch_size=16,
    num_channels=3,
    hidden_size=768,
    num_hidden_layers=12,
    num_attention_heads=12,
    intermediate_size=3072,
    hidden_act="gelu",
    hidden_dropout_prob=0.1,
    attention_probs_dropout_prob=0.1,
    initializer_range=0.02,
    layer_norm_eps=1e-12,
)
model = ViTForMultiLabelClassification(config)
model.to(device)

# 定义训练参数
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-4)
num_epochs = 10

# 训练循环
for epoch in range(num_epochs):
    model.train()
    running_loss = 0.0
    
    print(f"Epoch {epoch+1}/{num_epochs}")
    progress_bar = tqdm(enumerate(train_loader), total=len(train_loader))
    
    for i, (images, labels) in progress_bar:
        images, labels = images.to(device), labels.to(device)
        
        optimizer.zero_grad()
        outputs = model(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        
        running_loss += loss.item()
        progress_bar.set_description(f"Loss: {running_loss/(i+1):.4f}")
    
    # 测试模型
    model.eval()
    all_preds = []
    all_labels = []
    
    with torch.no_grad():
        for images, labels in test_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            preds = torch.sigmoid(outputs)
            
            all_preds.append(preds.cpu().numpy())
            all_labels.append(labels.cpu().numpy())
    
    all_preds = np.concatenate(all_preds)
    all_labels = np.concatenate(all_labels)
    
    # 计算多标签指标
    f1_micro = f1_score(all_labels, all_preds > 0.5, average='micro')
    roc_auc = roc_auc_score(all_labels, all_preds, average='micro')
    
    print(f"Test F1 Micro: {f1_micro:.4f} | Test ROC AUC: {roc_auc:.4f}")

# 保存模型
if not os.path.exists('models'):
    os.makedirs('models')
torch.save(model.state_dict(), 'models/vit_cifar10_multi_label_custom.pth')
print("Model saved successfully!")