#!/usr/bin/env python3 # -*- coding: utf-8 -*- from os.path import ( basename # listdir ) import random import scipy.io import datasets import numpy as np from PIL import Image _HOMEPAGE = "" _CITATION = "" _DESCRIPTION = "" _LICENSE = "" _IMAGES_DIR = "images" _ANNOTATIONS_DIR = "annotations" _BASE_URL = "data.zip" _LABEL_COLORS = [(59,193,246), (222,168,51), (161,78,69)] _METADATA_URLS = [ "train.txt" , "validation.txt" ] class Mmod2(datasets.GeneratorBasedBuilder): """Food-101 Images dataset""" def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "image": datasets.Image(), "label": datasets.Image(), } ), supervised_keys=("image", "label"), homepage=_HOMEPAGE, citation=_CITATION, license=_LICENSE, # task_templates=[datasets.ImageClassification(image_column="image", label_column="label")], ) def _split_generators(self, dl_manager): archive_path = dl_manager.download(_BASE_URL) split_metadata_paths = dl_manager.download(_METADATA_URLS) # print(f"{split_metadata_paths = }") self.data_dir = dl_manager.download_and_extract(_BASE_URL) with open(split_metadata_paths[0], encoding="utf-8") as f: train_files = set(f.read().split("\n")) with open(split_metadata_paths[1], encoding="utf-8") as f: validation_files = set(f.read().split("\n")) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "split_key": "train", "images": train_files, "metadata_path": split_metadata_paths[0], }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "split_key": "validation", "images": validation_files, "metadata_path": split_metadata_paths[1], }, ), ] def _generate_examples(self, images, metadata_path=None, split_key="train"): """Generate images and labels for splits.""" # print(f""" # {images = } # {metadata_path = } # {split_key = } # """) # metadata_path = "validation.txt" if split_key == "validation" else "train.txt" # metadata_path = datasets.download(metadata_path) # with open(metadata_path, encoding="utf-8") as f: # files_to_keep = set(f.read().split("\n")) for file_name in images: # get file name without extension # file_basename = basename(file_path)[: -len(".jpg")] # if file_basename in files_to_keep: raw_image_file = f"{self.data_dir}/data/{_IMAGES_DIR}/{file_name}.jpg" annotation = scipy.io.loadmat(f"{self.data_dir}/data/{_ANNOTATIONS_DIR}/{file_name}.mat")["annotations"][0][0][0] # print(f"{annotation = }") image = Image.open(raw_image_file) yield file_name, { "image": image, # {"path": file_path, "bytes": file_obj.read()}, "label": self.decode_labels(image, annotation), } def decode_labels(self, image: Image.Image, mask, num_images=1, num_classes=3): """Decode batch of segmentation masks. Args: mask: result of inference after taking argmax. num_images: number of images to decode from the batch. num_classes: number of classes to predict (including background). Returns: A batch with num_images RGB images of the same size as the input. """ h, w = mask.shape img = image.copy() # img = Image.new('RGB', (len(mask[i, 0]), len(mask[i]))) pixels = img.load() for point_index, point in enumerate(mask): px = int(point[0]) py = int(point[1]) if point_index < len(mask) - 1: next_px = int(mask[point_index+1][0]) next_py = int(mask[point_index+1][1]) # draw line x_direction = 1 if px < next_px else -1 y_direction = 1 if py < next_py else -1 for ix in range(px, next_px, x_direction): for iy in range(py, next_py, y_direction): if ix < img.size[0] and iy < img.size[1]: pixels[ix, iy] = _LABEL_COLORS[0] return img