import paddle
import paddle.nn as nn
import paddle.vision.transforms as T
from paddle.io import Dataset
import os
from PIL import Image
from tnt import tnt_s

# 自定义数据集类
class CustomDataset(Dataset):
    def __init__(self, data_dir, transform=None):
        self.data_dir = data_dir
        self.transform = transform
        self.classes = sorted(os.listdir(data_dir))
        self.class_to_idx = {cls_name: i for i, cls_name in enumerate(self.classes)}
        self.samples = []
        for cls_name in self.classes:
            cls_dir = os.path.join(data_dir, cls_name)
            for img_name in os.listdir(cls_dir):
                img_path = os.path.join(cls_dir, img_name)
                self.samples.append((img_path, self.class_to_idx[cls_name]))

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, idx):
        img_path, label = self.samples[idx]
        image = Image.open(img_path).convert('RGB')
        if self.transform:
            image = self.transform(image)
        return image, paddle.to_tensor(label, dtype='int64')

# 加载模型
model = tnt_s(pretrained=True, class_dim=len(os.listdir('./dataset/train')))

# 使用高层 API 进行模型封装
model = paddle.Model(model)

# 配置优化器
opt = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())

# 配置损失函数
loss = nn.CrossEntropyLoss()

# 配置评价指标
metric = paddle.metric.Accuracy(topk=(1, 5))

# 模型准备
model.prepare(optimizer=opt, loss=loss, metrics=metric)

# 配置数据增广
train_transforms = T.Compose([
    T.Resize(248, interpolation='bicubic'),  # 若支持，自动在GPU处理
    T.RandomCrop(224),
    T.ToTensor(),  # 转换为GPU张量
    T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
val_transforms = T.Compose([
    T.Resize(248, interpolation='bicubic'),
    T.CenterCrop(224),
    T.ToTensor(),
    T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])

# 配置数据集
train_data_dir = './dataset/train'
val_data_dir = './dataset/test'
train_dataset = CustomDataset(data_dir=train_data_dir, transform=train_transforms)
val_dataset = CustomDataset(data_dir=val_data_dir, transform=val_transforms)

# 模型微调
model.fit(
    train_data=train_dataset,
    eval_data=val_dataset,
    batch_size=64,
    epochs=20,
    eval_freq=1,
    log_freq=1,
    save_dir='save_models',
    save_freq=1,
    verbose=1,
    drop_last=False,
    shuffle=True,
    num_workers=4
)

