food_vision_199_classes / food_vision_199_classes.py
mrdbourke's picture
update food vision script
3bd1229
"""
Loading script for the Food Vision 199 classes dataset.
See the template: https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py
See the example for Food101: https://huggingface.co/datasets/food101/blob/main/food101.py
See another example: https://huggingface.co/datasets/davanstrien/encyclopedia_britannica/blob/main/encyclopedia_britannica.py
"""
import datasets
import os
import requests
import pandas as pd
from datasets.tasks import ImageClassification
# Print datasets version
print(f"Datasets version: {datasets.__version__}")
# Set verbosity to 10
datasets.logging.set_verbosity(10)
print(f"Verbosity level: {datasets.logging.get_verbosity()}")
_HOMEPAGE = "https://www.nutrify.app"
_LICENSE = "TODO"
_CITATION = "TODO"
_DESCRIPTION = "Images of 199 food classes from the Nutrify app."
# # Download class_names.txt and read it
# url = "https://huggingface.co/datasets/mrdbourke/food_vision_199_classes/blob/main/class_names.txt"
# r = requests.get(url, allow_redirects=True)
# open("class_names.txt", "wb").write(r.content)
# with open("class_names.txt", "r") as f:
# _NAMES = f.read().splitlines()
# Create list of class names
_NAMES = ['almond_butter',
'almonds',
'apple',
'apricot',
'asparagus',
'avocado',
'bacon',
'bacon_and_egg_burger',
'bagel',
'baklava',
'banana',
'banana_bread',
'barbecue_sauce',
'beans',
'beef',
'beef_curry',
'beef_mince',
'beef_stir_fry',
'beer',
'beetroot',
'biltong',
'blackberries',
'blueberries',
'bok_choy',
'bread',
'broccoli',
'broccolini',
'brownie',
'brussel_sprouts',
'burrito',
'butter',
'cabbage',
'calamari',
'candy',
'capsicum',
'carrot',
'cashews',
'cauliflower',
'celery',
'cheese',
'cheeseburger',
'cherries',
'chicken_breast',
'chicken_thighs',
'chicken_wings',
'chilli',
'chimichurri',
'chocolate',
'chocolate_cake',
'coconut',
'coffee',
'coleslaw',
'cookies',
'coriander',
'corn',
'corn_chips',
'cream',
'croissant',
'crumbed_chicken',
'cucumber',
'cupcake',
'daikon_radish',
'dates',
'donuts',
'dragonfruit',
'eggplant',
'eggs',
'enoki_mushroom',
'fennel',
'figs',
'french_toast',
'fried_rice',
'fries',
'fruit_juice',
'garlic',
'garlic_bread',
'ginger',
'goji_berries',
'granola',
'grapefruit',
'grapes',
'green_beans',
'green_onion',
'guacamole',
'guava',
'gyoza',
'ham',
'honey',
'hot_chocolate',
'ice_coffee',
'ice_cream',
'iceberg_lettuce',
'jerusalem_artichoke',
'kale',
'karaage_chicken',
'kimchi',
'kiwi_fruit',
'lamb_chops',
'leek',
'lemon',
'lentils',
'lettuce',
'lime',
'mandarin',
'mango',
'maple_syrup',
'mashed_potato',
'mayonnaise',
'milk',
'miso_soup',
'mushrooms',
'nectarines',
'noodles',
'nuts',
'olive_oil',
'olives',
'omelette',
'onion',
'orange',
'orange_juice',
'oysters',
'pain_au_chocolat',
'pancakes',
'papaya',
'parsley',
'parsnips',
'passionfruit',
'pasta',
'pawpaw',
'peach',
'pear',
'peas',
'pickles',
'pineapple',
'pizza',
'plum',
'pomegranate',
'popcorn',
'pork_belly',
'pork_chop',
'pork_loins',
'porridge',
'potato_bake',
'potato_chips',
'potato_scallop',
'potatoes',
'prawns',
'pumpkin',
'radish',
'ramen',
'raspberries',
'red_onion',
'red_wine',
'rhubarb',
'rice',
'roast_beef',
'roast_pork',
'roast_potatoes',
'rockmelon',
'rosemary',
'salad',
'salami',
'salmon',
'salsa',
'salt',
'sandwich',
'sardines',
'sausage_roll',
'sausages',
'scrambled_eggs',
'seaweed',
'shallots',
'snow_peas',
'soda',
'soy_sauce',
'spaghetti_bolognese',
'spinach',
'sports_drink',
'squash',
'starfruit',
'steak',
'strawberries',
'sushi',
'sweet_potato',
'tacos',
'tamarillo',
'taro',
'tea',
'toast',
'tofu',
'tomato',
'tomato_chutney',
'tomato_sauce',
'turnip',
'watermelon',
'white_onion',
'white_wine',
'yoghurt',
'zucchini']
# Create Food199 class
class Food199(datasets.GeneratorBasedBuilder):
"""Food199 Images dataset"""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"label": datasets.ClassLabel(names=_NAMES)
}
),
supervised_keys=("image", "label"),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE
)
def _split_generators(self, dl_manager):
"""
This function returns the logic to split the dataset into different splits as well as labels.
"""
annotations_csv = dl_manager.download("annotations_with_links.csv")
print(annotations_csv)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"annotations": annotations_csv,
"split": "train"
}
),
# datasets.SplitGenerator(
# name=datasets.Split.TEST,
# gen_kwargs={
# "annotations": annotations_csv,
# "split": "test"
# }
# )
]
def _generate_examples(self, annotations, split):
"""
This function takes in the kwargs from the _split_generators method and can then yield information from them.
"""
annotations_df = pd.read_csv(annotations, low_memory=False)
if split == "train":
annotations = annotations_df[["image", "label"]][annotations_df["split"] == "train"].to_dict(orient="records")
elif split == "test":
annotations = annotations_df[["image", "label"]][annotations_df["split"] == "test"].to_dict(orient="records")
for id_, row in enumerate(annotations):
# print(row["image"])
row["image"] = str(row.pop("image"))
row["label"] = row.pop("label")
# print(id_, row)
yield id_, row