import os
import numpy as np
import paddle
from paddle.io import Dataset
from paddle.vision.transforms import Compose, Resize, Normalize, Transpose, RandomHorizontalFlip
from PIL import Image
import matplotlib.pyplot as plt
from paddle.callbacks import Callback

class Garbages_Dataset(Dataset):
    def __init__(self, data_root, list_file, mode="train", transform=None):
        super(Garbages_Dataset, self).__init__()
        self.data_root = data_root
        self.list_file = list_file
        self.mode = mode
        self.transform = transform
        
        # 读取标签文件，建立类别名到ID的映射
        self.labels = []
        with open(os.path.join(data_root, "labels.txt"), "r", encoding="utf-8") as f:
            for line in f:
                self.labels.append(line.strip())
        self.class_to_idx = {cls: idx for idx, cls in enumerate(self.labels)}
        
        # 读取数据列表文件（train_list.txt或test_list.txt）
        self.samples = []
        with open(list_file, "r", encoding="utf-8") as f:
            for line in f:
                path, label = line.strip().split()
                self.samples.append((path, int(label)))
    
    def __getitem__(self, index):
        img_path, label = self.samples[index]
        
        # 使用PIL打开图像
        img = Image.open(img_path).convert('RGB')  # 确保图像为RGB格式
        
        # 应用数据变换
        if self.transform:
            img = self.transform(img)
        
        return img, label  # 返回处理后的图像和标签
    
    def __len__(self):
        return len(self.samples)
    
    def get_labels(self):
        return self.labels  # 返回类别名列表

# 训练集变换（包含数据增强）
train_transform = Compose([
    Resize(size=(224, 224)),            # 调整尺寸
    RandomHorizontalFlip(prob=0.5),     # 随机水平翻转
    Normalize(                          # 归一化（ImageNet标准）
        mean=[0.485, 0.456, 0.406], 
        std=[0.229, 0.224, 0.225], 
        data_format='HWC'
    ),
    Transpose()                         # 确保格式为CHW（Paddle默认）
])

# 测试集变换（仅预处理，无数据增强）
test_transform = Compose([
    Resize(size=(224, 224)),
    Normalize(
        mean=[0.485, 0.456, 0.406], 
        std=[0.229, 0.224, 0.225], 
        data_format='HWC'
    ),
    Transpose()
])  

# 数据集路径
data_root = "/home/aistudio/data/data89094/Garbages"
train_list = os.path.join(data_root, "train_list.txt")
test_list = os.path.join(data_root, "test_list.txt")

# 创建数据集实例
train_dataset = Garbages_Dataset(
    data_root=data_root,
    list_file=train_list,
    mode="train",
    transform=train_transform
)

test_dataset = Garbages_Dataset(
    data_root=data_root,
    list_file=test_list,
    mode="test",
    transform=test_transform
)

# 创建数据加载器
train_loader = paddle.io.DataLoader(
    train_dataset,
    batch_size=32,
    shuffle=True,
    num_workers=4
)

test_loader = paddle.io.DataLoader(
    test_dataset,
    batch_size=32,
    shuffle=False,
    num_workers=4
)

# 构建模型（以ResNet50为例）
model = paddle.vision.models.resnet50(pretrained=True)

# 获取原始模型fc层的输入维度（通过.weight.shape[1]）
in_features = 2048  # 替代in_features

# 修改最后一层为自定义类别数
model.fc = paddle.nn.Linear(in_features, len(train_dataset.get_labels()))

# 转换为Paddle Model（用于高层API训练）
model = paddle.Model(model)



# 自定义回调类来记录训练历史
class HistoryCallback(Callback):
    def __init__(self):
        self.history = {
            'loss': [],
            'acc': [],
            'val_loss': [],
            'val_acc': []
        }
    
    def on_epoch_end(self, epoch, logs=None):
        self.history['loss'].append(logs.get('loss'))
        self.history['acc'].append(logs.get('acc'))
        self.history['val_loss'].append(logs.get('val_loss'))
        self.history['val_acc'].append(logs.get('val_acc'))

# 创建可视化函数
def plot_training_metrics(history, save_dir="output"):
    """
    绘制训练过程中的损失和准确率曲线
    
    参数:
    history: 训练历史记录，包含train_loss, train_acc, val_loss, val_acc
    save_dir: 保存图像的目录
    """
    # 创建保存目录
    os.makedirs(save_dir, exist_ok=True)
    
    # 获取训练历史数据
    epochs = len(history['loss'])
    x = np.arange(1, epochs + 1)
    
    # 绘制损失曲线
    plt.figure(figsize=(12, 5))
    plt.subplot(1, 2, 1)
    plt.plot(x, history['loss'], 'b-o', label='训练损失')
    plt.plot(x, history['val_loss'], 'r-o', label='验证损失')
    plt.title('损失曲线')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.grid(True)
    plt.legend()
    
    # 绘制准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(x, history['acc'], 'b-o', label='训练准确率')
    plt.plot(x, history['val_acc'], 'r-o', label='验证准确率')
    plt.title('准确率曲线')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.grid(True)
    plt.legend()
    
    # 调整布局并保存图像
    plt.tight_layout()
    plt.savefig(os.path.join(save_dir, 'training_metrics.png'), dpi=300, bbox_inches='tight')
    plt.close()
    
    print(f"训练指标可视化已保存至: {os.path.join(save_dir, 'training_metrics.png')}")

# 训练完成后调用可视化函数
def visualize_training_results(model, train_loader, val_loader, epochs=2, save_dir="output"):
    """
    训练模型并可视化训练过程
    
    参数:
    model: 模型实例
    train_loader: 训练数据加载器
    val_loader: 验证数据加载器
    epochs: 训练轮数
    save_dir: 保存目录
    """
    # 配置训练参数
    model.prepare(
        optimizer=paddle.optimizer.Adam(parameters=model.parameters(), learning_rate=0.001),
        loss=paddle.nn.CrossEntropyLoss(),
        metrics=paddle.metric.Accuracy()
    )
    
    # 创建历史回调对象
    history_callback = HistoryCallback()
    
    # 训练模型并使用回调记录历史
    model.fit(
        train_loader,
        val_loader,
        epochs=epochs,
        batch_size=32,
        verbose=1,
        save_dir=os.path.join(save_dir, "checkpoints"),
        callbacks=[history_callback]  # 使用回调记录历史
    )
    
    # 获取训练历史
    history = history_callback.history
    
    # 可视化训练指标
    plot_training_metrics(history, save_dir)
    
    # 评估模型
    eval_result = model.evaluate(val_loader)
    print(f"最终评估结果: {eval_result}")
    
    return model, history

# 调用示例
if __name__ == "__main__":
    # 训练并可视化
    model, history = visualize_training_results(
        model=model,
        train_loader=train_loader,
        val_loader=test_loader,
        epochs=2,
        save_dir="output"
    )
    
    # 保存最终模型
    model.save("Model/Garbage01", training=False)
