Datasets:
import csv | |
import json | |
import os | |
from typing import List | |
import datasets | |
import logging | |
from datetime import datetime, timedelta | |
import pandas as pd | |
import requests | |
# TODO: Add BibTeX citation | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
@InProceedings{huggingface:dataset, | |
title = {A great new dataset}, | |
author={huggingface, Inc. | |
}, | |
year={2020} | |
} | |
""" | |
# TODO: Add description of the dataset here | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
This dataset contains traffic images from traffic signal cameras of singapore. The images are captured at 1.5 minute interval from 6 pm to 7 pm everyday for the month of January 2024. | |
""" | |
# TODO: Add a link to an official homepage for the dataset here | |
_HOMEPAGE = "https://beta.data.gov.sg/collections/354/view" | |
# TODO: Add the licence for the dataset here if you can find it | |
_LICENSE = "" | |
# TODO: Add link to the official dataset URLs here | |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files. | |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
# _URL = "https://raw.githubusercontent.com/Sayali-pingle/HuggingFace--Traffic-Image-Dataset/main/camera_data.csv" | |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case | |
class TrafficImages(datasets.GeneratorBasedBuilder): | |
"""TODO: Short description of my dataset.""" | |
# _URLS = _URLS | |
VERSION = datasets.Version("1.1.0") | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"timestamp": datasets.Value("string"), | |
"camera_id": datasets.Value("string"), | |
"latitude": datasets.Value("float"), | |
"longitude": datasets.Value("float"), | |
"image_url": datasets.Image(), | |
"image_metadata": datasets.Value("string") | |
} | |
), | |
homepage=_HOMEPAGE, | |
citation=_CITATION, | |
) | |
# def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: | |
# urls_to_download = self._URL | |
# downloaded_files = dl_manager.download_and_extract(urls_to_download) | |
# return [ | |
# datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), | |
# datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), | |
# ] | |
def _split_generators(self, dl_manager: datasets.DownloadManager): | |
# The URLs should be the paths to the raw files in the Hugging Face dataset repository | |
urls_to_download = { | |
"csv_file": "https://raw.githubusercontent.com/Sayali-pingle/HuggingFace--Traffic-Image-Dataset/main/camera_data.csv" | |
} | |
downloaded_files = dl_manager.download_and_extract(urls_to_download['csv_file']) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"csv_file_path": downloaded_files, | |
}, | |
), | |
] | |
def _generate_examples(self, csv_file_path): | |
# This method will yield examples from your dataset | |
# start_date = datetime(2024, 1, 1, 18, 0, 0) | |
# end_date = datetime(2024, 1, 2, 19, 0, 0) | |
# interval_seconds = 240 | |
# date_time_strings = [ | |
# (current_date + timedelta(seconds=seconds)).strftime('%Y-%m-%dT%H:%M:%S+08:00') | |
# for current_date in pd.date_range(start=start_date, end=end_date, freq='D') | |
# for seconds in range(0, 3600, interval_seconds) | |
# ] | |
# url = 'https://api.data.gov.sg/v1/transport/traffic-images' | |
# camera_data = [] | |
# for date_time in date_time_strings: | |
# params = {'date_time': date_time} | |
# response = requests.get(url, params=params) | |
# if response.status_code == 200: | |
# data = response.json() | |
# camera_data.extend([ | |
# { | |
# 'timestamp': item['timestamp'], | |
# 'camera_id': camera['camera_id'], | |
# 'latitude': camera['location']['latitude'], | |
# 'longitude': camera['location']['longitude'], | |
# 'image_url': camera['image'], | |
# 'image_metadata': camera['image_metadata'] | |
# } | |
# for item in data['items'] | |
# for camera in item['cameras'] | |
# ]) | |
# else: | |
# print(f"Error: {response.status_code}") | |
camera_data= pd.read_csv(csv_file_path) | |
for idx, example in camera_data.iterrows(): | |
yield idx, { | |
"timestamp": example["timestamp"], | |
"camera_id": example["camera_id"], | |
"latitude": example["latitude"], | |
"longitude": example["longitude"], | |
"image_url": example["image_url"], | |
"image_metadata": example["image_metadata"] | |
} | |