|
|
|
"""Untitled6.ipynb |
|
|
|
Automatically generated by Colab. |
|
|
|
Original file is located at |
|
https://colab.research.google.com/drive/1b3-0ogrDvdw3WtHwOta0Ihlo46MAwDsk |
|
""" |
|
|
|
!pip install requests transformers |
|
|
|
import requests |
|
import zipfile |
|
import os |
|
|
|
|
|
zip_url = 'https://huggingface.co/datasets/Dabococo/wheeloh_dataset/images' |
|
|
|
|
|
zip_file = 'images.zip' |
|
|
|
|
|
response = requests.get(zip_url) |
|
content_type = response.headers.get('Content-Type') |
|
|
|
|
|
if 'zip' not in content_type: |
|
raise ValueError("Le fichier téléchargé n'est pas un fichier zip. Content-Type: {}".format(content_type)) |
|
|
|
|
|
with open(zip_file, 'wb') as f: |
|
f.write(response.content) |
|
|
|
|
|
file_size = os.path.getsize(zip_file) |
|
print("Taille du fichier téléchargé:", file_size, "octets") |
|
|
|
|
|
with open(zip_file, 'rb') as f: |
|
print(f.read(100)) |
|
|
|
|
|
extract_dir = 'extracted_files' |
|
os.makedirs(extract_dir, exist_ok=True) |
|
|
|
|
|
try: |
|
with zipfile.ZipFile(zip_file, 'r') as zip_ref: |
|
zip_ref.extractall(extract_dir) |
|
|
|
extracted_files = os.listdir(extract_dir) |
|
print("Fichiers extraits :", extracted_files) |
|
except zipfile.BadZipFile: |
|
print("Erreur : le fichier téléchargé n'est pas un fichier zip valide.") |
|
|
|
from torchvision.datasets import ImageFolder |
|
import os |
|
from torchvision.datasets.folder import has_file_allowed_extension, IMG_EXTENSIONS, default_loader |
|
|
|
class CustomImageFolder(ImageFolder): |
|
def __init__(self, root, transform=None, loader=default_loader, is_valid_file=None): |
|
super().__init__(root, transform=transform, loader=loader, is_valid_file=is_valid_file) |
|
|
|
def find_classes(self, directory): |
|
|
|
classes = [d.name for d in os.scandir(directory) if d.is_dir() and not d.name.startswith('.')] |
|
classes.sort() |
|
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)} |
|
return classes, class_to_idx |
|
|
|
def make_dataset(self, directory, class_to_idx, extensions=None, is_valid_file=None, allow_empty=False): |
|
instances = [] |
|
directory = os.path.expanduser(directory) |
|
both_none = extensions is None and is_valid_file is None |
|
if both_none: |
|
raise ValueError("Both extensions and is_valid_file cannot be None") |
|
if extensions is not None: |
|
def is_valid_file(x): |
|
return has_file_allowed_extension(x, extensions) |
|
|
|
for target_class in sorted(class_to_idx.keys()): |
|
class_index = class_to_idx[target_class] |
|
target_dir = os.path.join(directory, target_class) |
|
if not os.path.isdir(target_dir): |
|
continue |
|
for root, _, fnames in sorted(os.walk(target_dir)): |
|
for fname in sorted(fnames): |
|
path = os.path.join(root, fname) |
|
if is_valid_file(path) and not fname.startswith('.'): |
|
item = path, class_index |
|
instances.append(item) |
|
|
|
if not allow_empty and len(instances) == 0: |
|
raise RuntimeError(f"Found 0 files in subfolders of: {directory}. Supported extensions are: {','.join(extensions)}") |
|
|
|
return instances |
|
|
|
import torch |
|
import torch.nn as nn |
|
import torch.optim as optim |
|
from torch.utils.data import DataLoader |
|
from torchvision import transforms, models |
|
from tqdm import tqdm |
|
|
|
|
|
batch_size = 32 |
|
num_epochs = 10 |
|
learning_rate = 0.001 |
|
num_classes = 2 |
|
|
|
|
|
transform = transforms.Compose([ |
|
transforms.Resize((224, 224)), |
|
transforms.ToTensor(), |
|
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
|
]) |
|
|
|
|
|
train_dataset = CustomImageFolder(root='/content/dataset/train', transform=transform) |
|
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True) |
|
|
|
|
|
model = models.resnet18(pretrained=True) |
|
model.fc = nn.Linear(model.fc.in_features, num_classes) |
|
model = model.to('cuda') |
|
|
|
|
|
criterion = nn.CrossEntropyLoss() |
|
optimizer = optim.Adam(model.parameters(), lr=learning_rate) |
|
|
|
|
|
scaler = torch.cuda.amp.GradScaler() |
|
|
|
|
|
for epoch in range(num_epochs): |
|
model.train() |
|
running_loss = 0.0 |
|
for inputs, labels in tqdm(train_loader): |
|
inputs, labels = inputs.to('cuda'), labels.to('cuda') |
|
|
|
|
|
optimizer.zero_grad() |
|
|
|
|
|
with torch.cuda.amp.autocast(): |
|
outputs = model(inputs) |
|
loss = criterion(outputs, labels) |
|
|
|
|
|
scaler.scale(loss).backward() |
|
scaler.step(optimizer) |
|
scaler.update() |
|
|
|
running_loss += loss.item() * inputs.size(0) |
|
|
|
epoch_loss = running_loss / len(train_loader.dataset) |
|
print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {epoch_loss:.4f}') |
|
|
|
print('Finished Training') |
|
|
|
|
|
torch.save(model.state_dict(), 'model.pth') |
|
|
|
|
|
|
|
import torch |
|
import torch.nn as nn |
|
from torchvision import models, transforms |
|
from PIL import Image |
|
|
|
|
|
num_classes = 2 |
|
model = models.resnet18(pretrained=False) |
|
model.fc = nn.Linear(model.fc.in_features, num_classes) |
|
model = model.to('cuda') |
|
|
|
|
|
model.load_state_dict(torch.load('model.pth')) |
|
model.eval() |
|
|
|
|
|
transform = transforms.Compose([ |
|
transforms.Resize((224, 224)), |
|
transforms.ToTensor(), |
|
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
|
]) |
|
|
|
|
|
def load_image(image_path): |
|
image = Image.open(image_path).convert('RGB') |
|
image = transform(image) |
|
image = image.unsqueeze(0) |
|
return image |
|
|
|
|
|
image_path = '/content/lg.jpeg' |
|
image = load_image(image_path).to('cuda') |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(image) |
|
_, predicted = torch.max(outputs, 1) |
|
|
|
classes = ['Alpine', 'Bugatti'] |
|
predicted_class = classes[predicted.item()] |
|
print(f'Predicted class: {predicted_class}') |
|
|
|
!pip install transformers huggingface_hub |
|
|
|
!huggingface-cli login |
|
|
|
from huggingface_hub import HfApi, HfFolder, Repository |
|
|
|
|
|
model_path = "Wheeloh-model_1.pth" |
|
repo_name = "Wheeloh-model_1" |
|
commit_message = "Initial commit" |
|
|
|
|
|
api = HfApi() |
|
|
|
|
|
token = HfFolder.get_token() |
|
|
|
|
|
repo_url = api.create_repo(repo_name, token=token, exist_ok=True) |
|
repo = Repository(local_dir=repo_name, clone_from=repo_url) |
|
|
|
|
|
import shutil |
|
shutil.copy(model_path, repo_name) |
|
|
|
|
|
repo.push_to_hub(commit_message=commit_message) |