import torch
import torch.optim as optim
import torch.nn as nn
import torchvision
from torch.utils.data import DataLoader
from models.autoencoder import Autoencoder
from config import BATCH_SIZE, EPOCHS, LR

# 数据加载（CIFAR-10）
transform = torchvision.transforms.Compose([
    torchvision.transforms.ToTensor()
])

train_data = torchvision.datasets.CIFAR10(
    root='./data',
    train=True,
    download=True,
    transform=transform
)

train_loader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)

# 初始化模型
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Autoencoder().to(device)

# 优化器 & 损失函数
optimizer = optim.Adam(model.parameters(), lr=LR)
criterion = nn.MSELoss()

# 训练循环
for epoch in range(EPOCHS):
    total_loss = 0
    for batch in train_loader:
        img, _ = batch
        img = img.to(device)

        optimizer.zero_grad()
        output = model(img)
        loss = criterion(output, img)
        loss.backward()
        optimizer.step()

        total_loss += loss.item()

    print(f"Epoch {epoch+1}/{EPOCHS}, Loss: {total_loss/len(train_loader):.4f}")

# 保存模型
torch.save(model.state_dict(), "autoencoder.pth")
