#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
ResNet 分类器模型
"""

import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torchvision import models, transforms
from PIL import Image
import numpy as np

class ImageDataset(Dataset):
    """图像数据集类"""
    
    def __init__(self, image_paths, labels, transform=None):
        """
        初始化数据集
        
        Args:
            image_paths (list): 图像路径列表
            labels (list): 标签列表
            transform (torchvision.transforms, optional): 数据转换
        """
        self.image_paths = image_paths
        self.labels = labels
        self.transform = transform
        
    def __len__(self):
        """返回数据集大小"""
        return len(self.image_paths)
    
    def __getitem__(self, idx):
        """获取数据项"""
        image_path = self.image_paths[idx]
        label = self.labels[idx]
        
        # 读取图像
        image = Image.open(image_path).convert('RGB')
        
        # 应用转换
        if self.transform:
            image = self.transform(image)
            
        return image, label

class ResNetClassifier:
    """ResNet 分类器"""
    
    def __init__(self, num_classes, model_name='resnet18', pretrained=True, device=None):
        """
        初始化分类器
        
        Args:
            num_classes (int): 类别数量
            model_name (str, optional): 模型名称, 'resnet18', 'resnet34', 'resnet50'等
            pretrained (bool, optional): 是否使用预训练模型
            device (torch.device, optional): 训练设备
        """
        # 设置设备
        self.device = device if device else torch.device("cuda" if torch.cuda.is_available() else "cpu")
        
        # 创建模型
        if model_name == 'resnet18':
            self.model = models.resnet18(pretrained=pretrained)
        elif model_name == 'resnet34':
            self.model = models.resnet34(pretrained=pretrained)
        elif model_name == 'resnet50':
            self.model = models.resnet50(pretrained=pretrained)
        else:
            raise ValueError(f"不支持的模型: {model_name}")
        
        # 修改最后的全连接层以适应类别数量
        in_features = self.model.fc.in_features
        self.model.fc = nn.Linear(in_features, num_classes)
        
        # 将模型移动到设备
        self.model = self.model.to(self.device)
        
        # 数据转换
        self.train_transform = transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.RandomRotation(15),
            transforms.ColorJitter(brightness=0.1, contrast=0.1),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        
        self.val_transform = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        
        # 训练参数
        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = None
        self.scheduler = None
    
    def train(self, train_paths, train_labels, val_paths, val_labels, batch_size=32,
              learning_rate=0.001, epochs=10, weight_decay=0.0001, callbacks=None):
        """
        训练模型
        
        Args:
            train_paths (list): 训练图像路径列表
            train_labels (list): 训练标签列表
            val_paths (list): 验证图像路径列表
            val_labels (list): 验证标签列表
            batch_size (int, optional): 批次大小
            learning_rate (float, optional): 学习率
            epochs (int, optional): 训练轮数
            weight_decay (float, optional): 权重衰减
            callbacks (dict, optional): 回调函数字典，包含 'on_epoch_end', 'on_batch_end'等
            
        Returns:
            dict: 训练历史记录
        """
        # 准备数据集
        train_dataset = ImageDataset(train_paths, train_labels, self.train_transform)
        val_dataset = ImageDataset(val_paths, val_labels, self.val_transform)
        
        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
        val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
        
        # 设置优化器
        self.optimizer = optim.Adam(
            self.model.parameters(), 
            lr=learning_rate, 
            weight_decay=weight_decay
        )
        
        # 设置学习率调度器
        self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
            self.optimizer, 
            T_max=epochs
        )
        
        # 训练历史
        history = {
            'train_loss': [],
            'train_acc': [],
            'val_loss': [],
            'val_acc': []
        }
        
        # 训练循环
        for epoch in range(epochs):
            # 训练阶段
            self.model.train()
            train_loss = 0.0
            train_correct = 0
            train_total = 0
            
            for batch_idx, (images, labels) in enumerate(train_loader):
                images = images.to(self.device)
                labels = labels.to(self.device)
                
                # 前向传播
                self.optimizer.zero_grad()
                outputs = self.model(images)
                loss = self.criterion(outputs, labels)
                
                # 反向传播
                loss.backward()
                self.optimizer.step()
                
                # 统计
                train_loss += loss.item()
                _, predicted = outputs.max(1)
                train_total += labels.size(0)
                train_correct += predicted.eq(labels).sum().item()
                
                # 批次结束回调
                if callbacks and 'on_batch_end' in callbacks:
                    callbacks['on_batch_end'](epoch, batch_idx, {
                        'loss': loss.item(),
                        'acc': 100. * predicted.eq(labels).sum().item() / labels.size(0)
                    })
            
            # 验证阶段
            self.model.eval()
            val_loss = 0.0
            val_correct = 0
            val_total = 0
            
            with torch.no_grad():
                for images, labels in val_loader:
                    images = images.to(self.device)
                    labels = labels.to(self.device)
                    
                    outputs = self.model(images)
                    loss = self.criterion(outputs, labels)
                    
                    val_loss += loss.item()
                    _, predicted = outputs.max(1)
                    val_total += labels.size(0)
                    val_correct += predicted.eq(labels).sum().item()
            
            # 计算平均损失和准确率
            train_loss = train_loss / len(train_loader)
            train_acc = 100. * train_correct / train_total
            val_loss = val_loss / len(val_loader)
            val_acc = 100. * val_correct / val_total
            
            # 更新历史记录
            history['train_loss'].append(train_loss)
            history['train_acc'].append(train_acc)
            history['val_loss'].append(val_loss)
            history['val_acc'].append(val_acc)
            
            # 更新学习率
            self.scheduler.step()
            
            # 轮次结束回调
            if callbacks and 'on_epoch_end' in callbacks:
                callbacks['on_epoch_end'](epoch, {
                    'train_loss': train_loss,
                    'train_acc': train_acc,
                    'val_loss': val_loss,
                    'val_acc': val_acc
                })
        
        return history
    
    def predict(self, image_path):
        """
        预测单张图像
        
        Args:
            image_path (str): 图像路径
            
        Returns:
            tuple: (预测类别索引, 置信度)
        """
        self.model.eval()
        
        # 读取图像
        image = Image.open(image_path).convert('RGB')
        
        # 应用转换
        image = self.val_transform(image)
        image = image.unsqueeze(0).to(self.device)  # 添加批次维度
        
        # 预测
        with torch.no_grad():
            outputs = self.model(image)
            probabilities = torch.nn.functional.softmax(outputs, dim=1)
            
            # 获取最高概率的类别和置信度
            confidence, predicted = probabilities.max(1)
            
        return predicted.item(), confidence.item()
    
    def evaluate(self, test_paths, test_labels, batch_size=32):
        """
        评估模型
        
        Args:
            test_paths (list): 测试图像路径列表
            test_labels (list): 测试标签列表
            batch_size (int, optional): 批次大小
            
        Returns:
            tuple: (准确率, 损失, 混淆矩阵)
        """
        self.model.eval()
        
        # 准备数据集
        test_dataset = ImageDataset(test_paths, test_labels, self.val_transform)
        test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
        
        # 统计
        test_loss = 0.0
        test_correct = 0
        test_total = 0
        all_predictions = []
        all_labels = []
        
        with torch.no_grad():
            for images, labels in test_loader:
                images = images.to(self.device)
                labels = labels.to(self.device)
                
                outputs = self.model(images)
                loss = self.criterion(outputs, labels)
                
                test_loss += loss.item()
                _, predicted = outputs.max(1)
                test_total += labels.size(0)
                test_correct += predicted.eq(labels).sum().item()
                
                # 收集预测结果和标签用于混淆矩阵
                all_predictions.extend(predicted.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())
        
        # 计算平均损失和准确率
        test_loss = test_loss / len(test_loader)
        test_acc = 100. * test_correct / test_total
        
        # 计算混淆矩阵
        confusion = np.zeros((len(set(test_labels)), len(set(test_labels))), dtype=np.int64)
        for p, t in zip(all_predictions, all_labels):
            confusion[t][p] += 1
        
        return test_acc, test_loss, confusion
    
    def save(self, path):
        """
        保存模型
        
        Args:
            path (str): 保存路径
        """
        torch.save({
            'model_state_dict': self.model.state_dict(),
        }, path)
    
    def load(self, path):
        """
        加载模型
        
        Args:
            path (str): 模型路径
            
        Returns:
            bool: 加载是否成功
        """
        try:
            checkpoint = torch.load(path, map_location=self.device)
            self.model.load_state_dict(checkpoint['model_state_dict'])
            return True
        except Exception as e:
            print(f"加载模型失败: {e}")
            return False
    
    def export_onnx(self, path, input_shape=(1, 3, 224, 224)):
        """
        导出为ONNX格式
        
        Args:
            path (str): 导出路径
            input_shape (tuple, optional): 输入形状
        """
        self.model.eval()
        dummy_input = torch.randn(input_shape, device=self.device)
        
        torch.onnx.export(
            self.model,
            dummy_input,
            path,
            export_params=True,
            opset_version=11,
            do_constant_folding=True,
            input_names=['input'],
            output_names=['output'],
            dynamic_axes={
                'input': {0: 'batch_size'},
                'output': {0: 'batch_size'}
            }
        ) 