import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, WeightedRandomSampler
from torchvision import transforms, models
import os
from PIL import Image
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter


# 自定义数据集类
class NaturalImagesDataset(Dataset):
    def __init__(self, data_dir, transform=None, augment=False):
        self.data_dir = data_dir
        self.transform = transform
        self.augment = augment
        self.images = []
        self.labels = []
        self.class_to_idx = {}
        self.idx_to_class = {}

        # 获取所有类别
        classes = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))]
        classes.sort()

        for idx, class_name in enumerate(classes):
            self.class_to_idx[class_name] = idx
            self.idx_to_class[idx] = class_name

            class_dir = os.path.join(data_dir, class_name)
            for img_name in os.listdir(class_dir):
                if img_name.lower().endswith(('.png', '.jpg', '.jpeg')):
                    self.images.append(os.path.join(class_dir, img_name))
                    self.labels.append(idx)

    def __len__(self):
        return len(self.images)

    def __getitem__(self, idx):
        img_path = self.images[idx]
        image = Image.open(img_path).convert('RGB')
        label = self.labels[idx]

        if self.transform:
            image = self.transform(image)

        return image, label, img_path  # 返回路径用于调试


# 改进的AlexNet模型
class ImprovedAlexNet(nn.Module):
    def __init__(self, num_classes=8, dropout_rate=0.5):
        super(ImprovedAlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),
            nn.MaxPool2d(kernel_size=3, stride=2),

            nn.Conv2d(96, 256, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),
            nn.MaxPool2d(kernel_size=3, stride=2),

            nn.Conv2d(256, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),

            nn.Conv2d(384, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),

            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
        self.classifier = nn.Sequential(
            nn.Dropout(p=dropout_rate),
            nn.Linear(256 * 6 * 6, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(p=dropout_rate),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, num_classes),
        )

    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x


# 数据增强变换
def get_train_transforms():
    return transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.RandomCrop((227, 227)),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomRotation(degrees=10),
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])


def get_val_transforms():
    return transforms.Compose([
        transforms.Resize((227, 227)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])


# 焦点损失函数 - 针对难例样本
class FocalLoss(nn.Module):
    def __init__(self, alpha=1, gamma=2, reduction='mean'):
        super(FocalLoss, self).__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.reduction = reduction

    def forward(self, inputs, targets):
        BCE_loss = nn.CrossEntropyLoss(reduction='none')(inputs, targets)
        pt = torch.exp(-BCE_loss)
        F_loss = self.alpha * (1 - pt) ** self.gamma * BCE_loss

        if self.reduction == 'mean':
            return torch.mean(F_loss)
        elif self.reduction == 'sum':
            return torch.sum(F_loss)
        else:
            return F_loss


def train_improved_model():
    # 数据集路径
    data_dir = "../natural_images"

    # 创建数据集
    train_transform = get_train_transforms()
    val_transform = get_val_transforms()

    full_dataset = NaturalImagesDataset(data_dir, transform=val_transform)

    # 分析类别分布
    class_counts = Counter(full_dataset.labels)
    print("类别分布:", class_counts)

    # 计算类别权重（处理类别不平衡）
    class_weights = {cls: 1.0 / count for cls, count in class_counts.items()}
    sample_weights = [class_weights[label] for label in full_dataset.labels]

    # 分割数据集
    train_idx, val_idx = train_test_split(
        list(range(len(full_dataset))),
        test_size=0.2,
        random_state=42,
        stratify=full_dataset.labels
    )

    # 创建带数据增强的训练集
    train_dataset = NaturalImagesDataset(data_dir, transform=train_transform)
    train_dataset = torch.utils.data.Subset(train_dataset, train_idx)

    val_dataset = NaturalImagesDataset(data_dir, transform=val_transform)
    val_dataset = torch.utils.data.Subset(val_dataset, val_idx)

    # 创建带权重采样的数据加载器
    train_weights = [sample_weights[i] for i in train_idx]
    train_sampler = WeightedRandomSampler(train_weights, len(train_weights))

    train_loader = DataLoader(train_dataset, batch_size=32, sampler=train_sampler)
    val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)

    # 初始化模型
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = ImprovedAlexNet(num_classes=len(full_dataset.class_to_idx), dropout_rate=0.5)
    model = model.to(device)

    # 定义损失函数和优化器
    criterion = FocalLoss(alpha=1, gamma=2)  # 使用焦点损失
    # criterion = nn.CrossEntropyLoss(label_smoothing=0.1)  # 或者使用标签平滑

    optimizer = optim.AdamW(model.parameters(), lr=0.001, weight_decay=1e-4)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)

    # 训练模型
    num_epochs = 15
    train_losses = []
    val_accuracies = []
    best_accuracy = 0

    # 记录猫狗类别的准确率
    cat_dog_accuracies = []

    for epoch in range(num_epochs):
        model.train()
        running_loss = 0.0

        for images, labels, _ in train_loader:
            images, labels = images.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

        # 验证
        model.eval()
        correct = 0
        total = 0
        cat_dog_correct = 0
        cat_dog_total = 0

        # 获取猫和狗的类别索引
        cat_idx = full_dataset.class_to_idx.get('cat', -1)
        dog_idx = full_dataset.class_to_idx.get('dog', -1)

        with torch.no_grad():
            for images, labels, _ in val_loader:
                images, labels = images.to(device), labels.to(device)
                outputs = model(images)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

                # 特别统计猫狗分类准确率
                if cat_idx != -1 and dog_idx != -1:
                    cat_dog_mask = (labels == cat_idx) | (labels == dog_idx)
                    if cat_dog_mask.any():
                        cat_dog_correct += ((predicted == labels) & cat_dog_mask).sum().item()
                        cat_dog_total += cat_dog_mask.sum().item()

        accuracy = 100 * correct / total
        cat_dog_accuracy = 100 * cat_dog_correct / cat_dog_total if cat_dog_total > 0 else 0

        train_losses.append(running_loss / len(train_loader))
        val_accuracies.append(accuracy)
        cat_dog_accuracies.append(cat_dog_accuracy)

        print(f'Epoch [{epoch + 1}/{num_epochs}]')
        print(f'Loss: {running_loss / len(train_loader):.4f}')
        print(f'总体准确率: {accuracy:.2f}%')
        print(f'猫狗分类准确率: {cat_dog_accuracy:.2f}%')
        print('-' * 50)

        # 保存最佳模型
        if accuracy > best_accuracy:
            best_accuracy = accuracy
            torch.save({
                'model_state_dict': model.state_dict(),
                'class_to_idx': full_dataset.class_to_idx,
                'idx_to_class': full_dataset.idx_to_class,
                'best_accuracy': best_accuracy
            }, 'alexnet_model.pth')

        scheduler.step()

    print(f"训练完成！最佳准确率: {best_accuracy:.2f}%")

    # 绘制训练曲线
    plt.figure(figsize=(15, 5))

    plt.subplot(1, 3, 1)
    plt.plot(train_losses)
    plt.title('Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')

    plt.subplot(1, 3, 2)
    plt.plot(val_accuracies)
    plt.title('Validation Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')

    plt.subplot(1, 3, 3)
    plt.plot(cat_dog_accuracies)
    plt.title('Cat-Dog Classification Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')

    plt.tight_layout()
    plt.savefig('improved_training_curves.png')
    plt.show()


if __name__ == "__main__":
    train_improved_model()