import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms, models, datasets
from torch.utils.data import DataLoader
import os
from PIL import Image
import matplotlib.pyplot as plt

# 数据预处理
data_transforms = {
    'train': transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    'val': transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}

# 自定义数据加载器
class CustomImageFolder(datasets.ImageFolder):
    def __init__(self, root, transform=None, target_transform=None, loader=datasets.folder.default_loader):
        super().__init__(root, transform=transform, target_transform=target_transform, loader=loader)
        self._check_and_remove_broken_images()

    def _check_and_remove_broken_images(self):
        """检查并移除损坏的图像文件"""
        broken_images = []
        for idx, (path, _) in enumerate(self.samples):
            try:
                with Image.open(path) as img:
                    img.verify()  # 验证图像是否损坏
            except (IOError, OSError) as e:
                print(f"Removing broken image: {path}")
                broken_images.append(idx)
        
        # 移除损坏的图像文件
        for idx in reversed(broken_images):
            del self.samples[idx]

# 加载数据集
def load_datasets(data_dir):
    image_datasets = {
        x: CustomImageFolder(
            os.path.join(data_dir, x),
            transform=data_transforms[x]
        )
        for x in ['train', 'val']
    }
    dataloaders = {
        x: DataLoader(
            image_datasets[x],
            batch_size=32,
            shuffle=True,
            num_workers=4  # 使用多进程加载数据
        )
        for x in ['train', 'val']
    }
    return image_datasets, dataloaders

# 定义模型
def initialize_model(num_classes=4):
    model = models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1)
    
    # 冻结所有参数
    for param in model.parameters():
        param.requires_grad = False
    
    # 修改最后的分类层
    model.classifier[1] = nn.Linear(model.classifier[1].in_features, num_classes)
    return model

# 训练函数
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 使用GPU或CPU
    
    model = model.to(device)
    
    for epoch in range(num_epochs):
        print(f'Epoch {epoch}/{num_epochs-1}')
        print('-' * 10)
        
        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()
            else:
                model.eval()
            
            running_loss = 0.0
            running_corrects = 0
            
            for inputs, labels in dataloaders[phase]:
                inputs = inputs.to(device)
                labels = labels.to(device)
                
                optimizer.zero_grad()
                
                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    _, preds = torch.max(outputs, 1)
                    loss = criterion(outputs, labels)
                    
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()
                
                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)
            
            epoch_loss = running_loss / len(dataloaders[phase].dataset)
            epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
            
            print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')
    
    return model

if __name__ == '__main__':
    # 设置数据路径
    data_dir = 'data'
    
    # 加载数据
    image_datasets, dataloaders = load_datasets(data_dir)
    
    # 初始化模型
    model = initialize_model(num_classes=4)
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    
    # 训练模型
    model = train_model(model, dataloaders, criterion, optimizer, num_epochs=10)
    
    # 保存模型
    torch.save(model.state_dict(), 'garbage_classification.pth')
    print("Model saved as garbage_classification.pth")