import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import torchvision.transforms as transforms
from torchvision import models
from transformers import SwinForImageClassification
import pandas as pd
import os
from PIL import Image
import torch.optim.lr_scheduler as lr_scheduler

os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"


class EnsembleModel(nn.Module):
    def __init__(self, num_classes=49, weight_swin=0.5, weight_efficient=0.5):
        super(EnsembleModel, self).__init__()
        self.swin = SwinForImageClassification.from_pretrained("microsoft/swin-base-patch4-window7-224", ignore_mismatched_sizes=True)
        self.efficientnet = models.efficientnet_b7(weights="IMAGENET1K_V1")
        if isinstance(self.efficientnet.classifier, nn.Sequential):
            for layer in self.efficientnet.classifier:
                if isinstance(layer, nn.Linear):
                    in_features = layer.in_features
                    break
        else:
            in_features = self.efficientnet.classifier.in_features
        self.efficientnet.classifier = nn.Linear(in_features, num_classes)
        self.fc_swin = nn.Linear(1000, num_classes)
        self.weight_swin = weight_swin
        self.weight_efficient = weight_efficient

    def forward(self, x):
        swin_out = self.swin(x).logits
        swin_out = self.fc_swin(swin_out)
        efficient_out = self.efficientnet(x)
        combined_output = self.weight_swin * swin_out + self.weight_efficient * efficient_out
        return combined_output


class CustomDataset(Dataset):
    def __init__(self, csv_file=None, img_dir=None, transform=None):
        if csv_file:
            self.data = pd.read_csv(csv_file)
        else:
            self.data = None
        self.img_dir = img_dir
        self.transform = transform

    def __len__(self):
        if self.data is not None:
            return len(self.data)
        else:
            return len(os.listdir(self.img_dir))

    def __getitem__(self, idx):
        if self.data is not None:
            img_id = self.data.iloc[idx, 0]
            img_path = os.path.join(self.img_dir, str(img_id) + ".jpg")
        else:
            img_name = os.listdir(self.img_dir)[idx]
            img_id = int(os.path.splitext(img_name)[0])
            img_path = os.path.join(self.img_dir, img_name)
        image = Image.open(img_path).convert("RGB")
        if self.transform:
            image = self.transform(image)
        if self.data is not None:
            label = self.data.iloc[idx, 1]
        else:
            label = -1
        return image, label, img_id


def get_dataloaders(batch_size=8, img_size=224, train_csv="Art/train.csv", val_csv="Art/train.csv"):
    transform = transforms.Compose([
        transforms.Resize((img_size, img_size)),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    train_dataset = CustomDataset(train_csv, "Art/train", transform=transform)
    val_dataset = CustomDataset(val_csv, "Art/train", transform=transform)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
    return train_loader, val_loader


def train(model, train_loader, criterion, optimizer, epoch, print_freq=10):
    model.train()
    accumulation_steps = 2
    scaler = torch.cuda.amp.GradScaler()
    for i, (images, labels, _) in enumerate(train_loader):
        images, labels = images.cuda(), labels.cuda()
        with torch.cuda.amp.autocast():
            outputs = model(images)
            loss = criterion(outputs, labels) / accumulation_steps
        scaler.scale(loss).backward()
        if (i + 1) % accumulation_steps == 0:
            scaler.step(optimizer)
            scaler.update()
            optimizer.zero_grad()
        if i % print_freq == 0:
            print(f"Epoch [{epoch + 1}], Step [{i + 1}/{len(train_loader)}], Loss: {loss.item():.4f}")
        torch.cuda.empty_cache()


def validate(model, val_loader, criterion):
    model.eval()
    correct, total, val_loss = 0, 0, 0
    with torch.no_grad():
        for images, labels, _ in val_loader:
            images, labels = images.cuda(), labels.cuda()
            outputs = model(images)
            loss = criterion(outputs, labels)
            val_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    accuracy = 100 * correct / total
    print(f"Validation Loss: {val_loss / len(val_loader):.4f}, Accuracy: {accuracy:.2f}%")
    return accuracy


def main():
    batch_size = 8
    num_epochs = 4
    learning_rate = 1e-4
    weight_decay = 1e-4
    weight_swin, weight_efficient = 0.5, 0.5
    model = EnsembleModel(weight_swin=weight_swin, weight_efficient=weight_efficient).cuda()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
    scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    train_loader, val_loader = get_dataloaders(batch_size=batch_size)
    best_accuracy = 0.0
    for epoch in range(num_epochs):
        train(model, train_loader, criterion, optimizer, epoch)
        accuracy = validate(model, val_loader, criterion)
        scheduler.step()
        if accuracy > best_accuracy:
            best_accuracy = accuracy
            torch.save(model.state_dict(), "best_model.pth")
        torch.cuda.empty_cache()
    print("训练完成，最佳验证准确率: {:.2f}%".format(best_accuracy))


if __name__ == "__main__":
    main()
