Sayali9141 commited on
Commit
f2b317c
1 Parent(s): a4171d3

Update signals.py

Browse files
Files changed (1) hide show
  1. signals.py +62 -43
signals.py CHANGED
@@ -4,7 +4,9 @@ import os
4
  from typing import List
5
  import datasets
6
  import logging
7
-
 
 
8
  # TODO: Add BibTeX citation
9
  # Find for instance the citation on arxiv or on the dataset repo/website
10
  _CITATION = """\
@@ -31,13 +33,13 @@ _LICENSE = ""
31
  # TODO: Add link to the official dataset URLs here
32
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
33
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
34
- _URL = "https://github.com/Sayali-pingle/HuggingFace--Traffic-Image-Dataset/blob/main/camera_data.csv"
35
 
36
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
37
  class TrafficImages(datasets.GeneratorBasedBuilder):
38
  """TODO: Short description of my dataset."""
39
 
40
- #_URLS = _URLS
41
  VERSION = datasets.Version("1.1.0")
42
 
43
  def _info(self):
@@ -49,7 +51,7 @@ class TrafficImages(datasets.GeneratorBasedBuilder):
49
  "camera_id": datasets.Value("string"),
50
  "latitude": datasets.Value("float"),
51
  "longitude": datasets.Value("float"),
52
- "image_url": datasets.Value("string"),
53
  "image_metadata": datasets.Value("string")
54
  }
55
  ),
@@ -57,52 +59,70 @@ class TrafficImages(datasets.GeneratorBasedBuilder):
57
  citation=_CITATION,
58
  )
59
 
60
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
61
- urls_to_download = self._URL
62
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
63
 
64
- return [
65
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
66
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
67
- ]
68
 
69
- def _generate_examples(self, file_path):
70
- # This method will yield examples from your dataset
71
- start_date = datetime(2024, 1, 1, 18, 0, 0)
72
- end_date = datetime(2024, 1, 31, 19, 0, 0)
73
- interval_seconds = 240
 
74
 
75
- date_time_strings = [
76
- (current_date + timedelta(seconds=seconds)).strftime('%Y-%m-%dT%H:%M:%S+08:00')
77
- for current_date in pd.date_range(start=start_date, end=end_date, freq='D')
78
- for seconds in range(0, 3600, interval_seconds)
 
 
 
79
  ]
 
 
 
 
 
 
80
 
81
- url = 'https://api.data.gov.sg/v1/transport/traffic-images'
82
- camera_data = []
 
 
 
83
 
84
- for date_time in date_time_strings:
85
- params = {'date_time': date_time}
86
- response = requests.get(url, params=params)
87
 
88
- if response.status_code == 200:
89
- data = response.json()
90
- camera_data.extend([
91
- {
92
- 'timestamp': item['timestamp'],
93
- 'camera_id': camera['camera_id'],
94
- 'latitude': camera['location']['latitude'],
95
- 'longitude': camera['location']['longitude'],
96
- 'image_url': camera['image'],
97
- 'image_metadata': camera['image_metadata']
98
- }
99
- for item in data['items']
100
- for camera in item['cameras']
101
- ])
102
- else:
103
- print(f"Error: {response.status_code}")
104
 
105
- for idx, example in enumerate(camera_data):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  yield idx, {
107
  "timestamp": example["timestamp"],
108
  "camera_id": example["camera_id"],
@@ -111,4 +131,3 @@ class TrafficImages(datasets.GeneratorBasedBuilder):
111
  "image_url": example["image_url"],
112
  "image_metadata": example["image_metadata"]
113
  }
114
-
 
4
  from typing import List
5
  import datasets
6
  import logging
7
+ from datetime import datetime, timedelta
8
+ import pandas as pd
9
+ import requests
10
  # TODO: Add BibTeX citation
11
  # Find for instance the citation on arxiv or on the dataset repo/website
12
  _CITATION = """\
 
33
  # TODO: Add link to the official dataset URLs here
34
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
35
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
36
+ # _URL = "https://raw.githubusercontent.com/Sayali-pingle/HuggingFace--Traffic-Image-Dataset/main/camera_data.csv"
37
 
38
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
39
  class TrafficImages(datasets.GeneratorBasedBuilder):
40
  """TODO: Short description of my dataset."""
41
 
42
+ # _URLS = _URLS
43
  VERSION = datasets.Version("1.1.0")
44
 
45
  def _info(self):
 
51
  "camera_id": datasets.Value("string"),
52
  "latitude": datasets.Value("float"),
53
  "longitude": datasets.Value("float"),
54
+ "image_url": datasets.Image(),
55
  "image_metadata": datasets.Value("string")
56
  }
57
  ),
 
59
  citation=_CITATION,
60
  )
61
 
62
+ # def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
63
+ # urls_to_download = self._URL
64
+ # downloaded_files = dl_manager.download_and_extract(urls_to_download)
65
 
66
+ # return [
67
+ # datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
68
+ # datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
69
+ # ]
70
 
71
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
72
+ # The URLs should be the paths to the raw files in the Hugging Face dataset repository
73
+ urls_to_download = {
74
+ "csv_file": "https://raw.githubusercontent.com/Sayali-pingle/HuggingFace--Traffic-Image-Dataset/main/camera_data.csv"
75
+ }
76
+ downloaded_files = dl_manager.download_and_extract(urls_to_download['csv_file'])
77
 
78
+ return [
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.TRAIN,
81
+ gen_kwargs={
82
+ "csv_file_path": downloaded_files,
83
+ },
84
+ ),
85
  ]
86
+
87
+ def _generate_examples(self, csv_file_path):
88
+ # This method will yield examples from your dataset
89
+ # start_date = datetime(2024, 1, 1, 18, 0, 0)
90
+ # end_date = datetime(2024, 1, 2, 19, 0, 0)
91
+ # interval_seconds = 240
92
 
93
+ # date_time_strings = [
94
+ # (current_date + timedelta(seconds=seconds)).strftime('%Y-%m-%dT%H:%M:%S+08:00')
95
+ # for current_date in pd.date_range(start=start_date, end=end_date, freq='D')
96
+ # for seconds in range(0, 3600, interval_seconds)
97
+ # ]
98
 
99
+ # url = 'https://api.data.gov.sg/v1/transport/traffic-images'
100
+ # camera_data = []
 
101
 
102
+ # for date_time in date_time_strings:
103
+ # params = {'date_time': date_time}
104
+ # response = requests.get(url, params=params)
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
+ # if response.status_code == 200:
107
+ # data = response.json()
108
+ # camera_data.extend([
109
+ # {
110
+ # 'timestamp': item['timestamp'],
111
+ # 'camera_id': camera['camera_id'],
112
+ # 'latitude': camera['location']['latitude'],
113
+ # 'longitude': camera['location']['longitude'],
114
+ # 'image_url': camera['image'],
115
+ # 'image_metadata': camera['image_metadata']
116
+ # }
117
+ # for item in data['items']
118
+ # for camera in item['cameras']
119
+ # ])
120
+ # else:
121
+ # print(f"Error: {response.status_code}")
122
+
123
+ camera_data= pd.read_csv(csv_file_path)
124
+
125
+ for idx, example in camera_data.iterrows():
126
  yield idx, {
127
  "timestamp": example["timestamp"],
128
  "camera_id": example["camera_id"],
 
131
  "image_url": example["image_url"],
132
  "image_metadata": example["image_metadata"]
133
  }