week10.1 / app.py
EUNSEO56's picture
Create app.py
16220d7
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim
# κΈ°λ³Έ λͺ¨λΈ λ‘œλ“œ
base_model = torchvision.models.vqa_resnet_finetune(pretrained=True)
# OK-VQA 데이터셋 λ‘œλ“œ 및 μ „μ²˜λ¦¬
# (μ—¬κΈ°μ—μ„œλŠ” 데이터λ₯Ό λ‘œλ“œν•˜λŠ” μ½”λ“œμ™€ μ „μ²˜λ¦¬ 과정을 κ°„λž΅ν•˜κ²Œ ν‘œν˜„ν•©λ‹ˆλ‹€)
train_dataset = OKVQADataset('train_data.json', transform=transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()]))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
# μƒˆλ‘œμš΄ λ ˆμ΄μ–΄ μΆ”κ°€
num_classes = len(train_dataset.classes) # μ˜ˆμ‹œμ—μ„œλŠ” λ°μ΄ν„°μ…‹μ˜ 클래슀 수λ₯Ό μ‚¬μš©
base_model.fc = nn.Linear(base_model.fc.in_features, num_classes)
# GPU μ‚¬μš© κ°€λŠ₯ μ‹œ GPU둜 λͺ¨λΈ 이동
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
base_model = base_model.to(device)
# Loss 및 Optimizer μ •μ˜
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(base_model.parameters(), lr=0.001)
# Fine-tuning
num_epochs = 10
for epoch in range(num_epochs):
for inputs, labels in train_loader:
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = base_model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
print(f'Epoch {epoch+1}/{num_epochs}, Loss: {loss.item()}')
torch.save(base_model.state_dict(), 'git-vqa-finetuned-on-ok-vqa.pth')