import os
import random
import time
import copy
from collections import Counter
import numpy as np
import torch
torch.cuda.empty_cache()  # 清空GPU缓存
from torch import nn, optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import ImageFolder
from torchvision.models import resnet50, ResNet50_Weights
from tqdm import tqdm
# 导入混合精度训练相关组件
from torch.cuda.amp import GradScaler, autocast
from torch.optim.lr_scheduler import OneCycleLR
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler

class CatBreedClassifier:
    def __init__(self, train_dir, test_dir, batch_size=128, lr=0.001, momentum=0.9, num_epochs=50):
        """
        初始化猫品种分类器
        batch_size: 每批训练的图片数量，默认128
        lr: 学习率，默认0.001
        momentum: 动量参数，默认0.9
        num_epochs: 训练轮数，默认50
        """
        self.device = "cuda" if torch.cuda.is_available() else "cpu"  # 检查是否可用GPU
        self.train_dir = train_dir  # 训练数据目录
        self.test_dir = test_dir    # 测试数据目录
        self.batch_size = batch_size
        self.lr = lr
        self.momentum = momentum
        self.num_epochs = num_epochs
        self._set_random_seed(2025)  # 设置随机种子
        self.scaler = GradScaler()   # 初始化梯度缩放器用于混合精度训练
        self._prepare_data()         # 准备数据
        self._initialize_model()     # 初始化模型

    def _set_random_seed(self, seed):
        """设置随机种子确保实验可重复性"""
        np.random.seed(seed)
        torch.manual_seed(seed)
        random.seed(seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    def _prepare_data(self):
        """准备训练和测试数据"""
        # 定义训练数据增强转换
        train_transform = transforms.Compose([
            transforms.RandomResizedCrop(224),      # 随机裁剪到224x224
            transforms.RandomHorizontalFlip(),      # 随机水平翻转
            transforms.RandomRotation(15),          # 随机旋转15度
            transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),  # 颜色增强
            transforms.ToTensor(),                  # 转换为张量
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # 标准化
        ])

        # 定义测试数据转换
        test_transform = transforms.Compose([
            transforms.Resize((250, 250)),  # 调整大小
            transforms.ToTensor(),          # 转换为张量
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

        # 加载数据集
        self.train_data = ImageFolder(self.train_dir, train_transform)
        self.test_data = ImageFolder(self.test_dir, test_transform)

        # 优化数据加载
        num_workers = min(os.cpu_count(), 8)  # 设置工作进程数
        
        # 配置训练数据加载器
        self.train_loader = DataLoader(
            self.train_data, 
            batch_size=self.batch_size,
            shuffle=True,              # 随机打乱数据
            num_workers=num_workers,   # 多进程加载
            pin_memory=True,           # 将数据固定在内存中
            prefetch_factor=2          # 预加载因子
        )
        
        # 配置测试数据加载器
        self.test_loader = DataLoader(
            self.test_data,
            batch_size=self.batch_size,
            shuffle=False,
            num_workers=num_workers,
            pin_memory=True
        )

        # 整理数据加载器和数据集大小
        self.dataloaders = {"train": self.train_loader, "test": self.test_loader}
        self.dataset_sizes = {"train": len(self.train_data), "test": len(self.test_data)}
        self.classes = sorted(os.listdir(self.train_dir))  # 获取类别列表
        self.index_to_class = {v: k for k, v in self.test_data.class_to_idx.items()}

    def _initialize_model(self):
        """初始化和配置模型"""
        # 加载预训练的ResNet50模型
        self.model = resnet50(weights=ResNet50_Weights.DEFAULT)
        
        # 冻结大部分预训练层
        for param in list(self.model.parameters())[:-3]:
            param.requires_grad = False
            
        # 修改最后的全连接层
        num_ftrs = self.model.fc.in_features
        self.model.fc = nn.Sequential(
            nn.Dropout(0.5),                    # 添加Dropout防止过拟合
            nn.Linear(num_ftrs, len(self.classes))  # 输出层
        )
        self.model = self.model.to(self.device)  # 将模型移到GPU

        # 定义损失函数
        self.criterion = nn.CrossEntropyLoss()
        
        # 使用AdamW优化器
        self.optimizer = optim.AdamW(
            self.model.parameters(),
            lr=self.lr,
            weight_decay=0.01  # L2正则化
        )

        # 计算总训练步数
        steps_per_epoch = len(self.train_loader)
        total_steps = steps_per_epoch * self.num_epochs
        
        # 配置学习率调度器
        self.scheduler = OneCycleLR(
            self.optimizer,
            max_lr=self.lr,
            total_steps=total_steps,
            pct_start=0.3,
            anneal_strategy='cos'
        )

    def train(self, num_epochs=25):
        """训练模型"""
        since = time.time()  # 记录开始时间
        best_model_wts = copy.deepcopy(self.model.state_dict())  # 保存最佳模型权重
        best_acc = 0.0  # 最佳准确率

        for epoch in range(num_epochs):
            print(f'Epoch {epoch}/{num_epochs - 1}')
            print('-' * 10)

            # 每个epoch都有训练和测试阶段
            for phase in ['train', 'test']:
                if phase == 'train':
                    self.model.train()  # 设置为训练模式
                else:
                    self.model.eval()   # 设置为评估模式

                running_loss = 0.0
                running_corrects = 0

                # 遍历数据批次
                for inputs, labels in tqdm(self.dataloaders[phase]):
                    inputs = inputs.to(self.device)
                    labels = labels.to(self.device)

                    self.optimizer.zero_grad()  # 清零梯度

                    # 前向传播
                    with torch.set_grad_enabled(phase == 'train'):
                        with autocast():  # 使用混合精度训练
                            outputs = self.model(inputs)
                            loss = self.criterion(outputs, labels)

                        if phase == 'train':
                            # 反向传播
                            self.scaler.scale(loss).backward()
                            self.scaler.step(self.optimizer)
                            self.scaler.update()
                            self.scheduler.step()  # 更新学习率

                    # 统计
                    _, preds = torch.max(outputs, 1)
                    running_loss += loss.item() * inputs.size(0)
                    running_corrects += torch.sum(preds == labels.data)

                # 计算epoch的损失和准确率
                epoch_loss = running_loss / self.dataset_sizes[phase]
                epoch_acc = running_corrects.double() / self.dataset_sizes[phase]

                print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')

                # 保存最佳模型
                if phase == 'test' and epoch_acc > best_acc:
                    best_acc = epoch_acc
                    best_model_wts = copy.deepcopy(self.model.state_dict())

            print()

        # 训练完成统计
        time_elapsed = time.time() - since
        print(f'训练完成，用时 {time_elapsed // 60:.0f}分 {time_elapsed % 60:.0f}秒')
        print(f'最佳测试准确率: {best_acc:4f}')

        # 加载最佳模型权重
        self.model.load_state_dict(best_model_wts)
        
        # 保存模型
        torch.save(self.model, "app/model.pt")
        print("最佳模型已保存为 'app/model.pt'")

    def evaluate(self):
        running_corrects = 0
        pred = []
        target = []

        for inputs, labels in tqdm(self.dataloaders["test"], desc="test", position=0, leave=True):
            inputs = inputs.to(self.device)
            labels = labels.to(self.device)

            target += labels.cpu().detach().numpy().tolist()

            with torch.no_grad():
                outputs = self.model(inputs)
                _, preds = torch.max(outputs, 1)
                pred += preds.cpu().detach().numpy().tolist()

            running_corrects += torch.sum(preds == labels.data)

        correct_counts = Counter()
        total_counts = Counter(target)

        for t, p in zip(target, pred):
            if t == p:
                correct_counts[t] += 1

        accuracy = {}
        for c in range(len(self.classes)):
            total = total_counts[c]
            accuracy[c] = correct_counts[c] / total if total > 0 else None

        for c in range(len(self.classes)):
            class_name = self.classes[c]
            acc = accuracy[c]
            if acc is not None:
                print(f"Class {class_name} (ID: {c}) - Accuracy: {acc:.2%}")
            else:
                print(f"Class {class_name} (ID: {c}) - No samples in test set")
                
                

if __name__ == "__main__":
    TRAIN_DIR = r"E:\FelixAI\bigModel\35-cat-breed-dataset\train"
    TEST_DIR = r"E:\FelixAI\bigModel\35-cat-breed-dataset\test" 

    classifier = CatBreedClassifier(TRAIN_DIR, TEST_DIR)
    classifier.train(num_epochs=50)
    classifier.evaluate()