import os
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, datasets
import sys
from PIL import Image
import time
import random
from tqdm import tqdm

# 添加项目根目录到路径
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

from models.image_classifier import ImageClassifier

class ImageClassificationDataset(Dataset):
    """用于图像分类的数据集"""
    
    def __init__(self, face_dir, flower_dir, transform=None, max_images_per_class=None):
        """
        初始化数据集
        
        参数:
            face_dir: 人脸图像目录
            flower_dir: 花卉图像目录
            transform: 图像变换
            max_images_per_class: 每个类别的最大图像数量，None表示使用所有图像
        """
        self.transform = transform
        
        # 收集所有图像路径和标签
        self.images = []
        self.labels = []
        
        # 加载人脸图像（标签0）
        face_paths = []
        for root, _, files in os.walk(face_dir):
            for file in files:
                if file.endswith(('.jpg', '.jpeg', '.png')) and not file.endswith('_gray.jpg'):
                    face_paths.append(os.path.join(root, file))
                    
        # 随机选择一部分图像
        if max_images_per_class and len(face_paths) > max_images_per_class:
            face_paths = random.sample(face_paths, max_images_per_class)
        
        # 添加人脸图像
        for path in face_paths:
            self.images.append(path)
            self.labels.append(0)
        
        # 加载花卉图像（标签1）
        flower_paths = []
        for root, _, files in os.walk(flower_dir):
            for file in files:
                if file.endswith(('.jpg', '.jpeg', '.png')) and not file.endswith('_gray.jpg'):
                    flower_paths.append(os.path.join(root, file))
                    
        # 随机选择一部分图像
        if max_images_per_class and len(flower_paths) > max_images_per_class:
            flower_paths = random.sample(flower_paths, max_images_per_class)
        
        # 添加花卉图像
        for path in flower_paths:
            self.images.append(path)
            self.labels.append(1)
            
        print(f"加载了 {len(face_paths)} 张人脸图像和 {len(flower_paths)} 张花卉图像")
    
    def __len__(self):
        return len(self.images)
    
    def __getitem__(self, idx):
        image_path = self.images[idx]
        label = self.labels[idx]
        
        # 读取图像
        image = Image.open(image_path).convert('RGB')
        
        if self.transform:
            image = self.transform(image)
        
        return image, label

def create_data_loaders(args):
    """创建数据加载器"""
    # 数据变换
    train_transform = transforms.Compose([
        transforms.Resize(224),  # 简化，直接调整为目标大小
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    
    val_transform = transforms.Compose([
        transforms.Resize(224),  # 简化，直接调整为目标大小
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    
    # 不限制每个类别的图像数量，使用所有可用图像
    max_images_per_class = None
    
    # 创建数据集
    train_dataset = ImageClassificationDataset(
        args.face_train,
        args.flower_train,
        transform=train_transform,
        max_images_per_class=max_images_per_class
    )
    
    val_dataset = ImageClassificationDataset(
        args.face_val,
        args.flower_val,
        transform=val_transform,
        max_images_per_class=max_images_per_class
    )
    
    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers
    )
    
    # 定义类别名称
    class_names = ['face', 'flower']
    
    return train_loader, val_loader, class_names

def train(args):
    """训练图像分类器"""
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu')
    print(f"使用设备: {device}")
    
    # 创建数据加载器
    train_loader, val_loader, class_names = create_data_loaders(args)
    print(f"训练集大小: {len(train_loader.dataset)}, 验证集大小: {len(val_loader.dataset)}")
    
    # 创建分类器
    classifier = ImageClassifier(num_classes=len(class_names))
    classifier = classifier.to(device)
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(classifier.parameters(), lr=args.lr)
    
    # 训练循环实现
    # ... 这里需要实现训练循环 ...
    
    print("分类器训练完成!")

def main():
    parser = argparse.ArgumentParser(description='图像分类器训练')
    
    # 数据目录
    parser.add_argument('--face_train', type=str, required=True, help='人脸训练数据目录')
    parser.add_argument('--face_val', type=str, required=True, help='人脸验证数据目录')
    parser.add_argument('--flower_train', type=str, required=True, help='花卉训练数据目录')
    parser.add_argument('--flower_val', type=str, required=True, help='花卉验证数据目录')
    
    # 训练参数
    parser.add_argument('--batch_size', type=int, default=32, help='批次大小')
    parser.add_argument('--epochs', type=int, default=10, help='训练轮次')
    parser.add_argument('--lr', type=float, default=0.001, help='学习率')
    parser.add_argument('--output_dir', type=str, default='models/checkpoints', help='输出目录')
    parser.add_argument('--num_workers', type=int, default=4, help='数据加载线程数')
    parser.add_argument('--no_cuda', action='store_true', help='不使用CUDA')
    
    args = parser.parse_args()
    
    train(args)

if __name__ == '__main__':
    main() 