rshrott commited on
Commit
1af3915
1 Parent(s): d61a5ba

Update renovation.py

Browse files
Files changed (1) hide show
  1. renovation.py +56 -53
renovation.py CHANGED
@@ -1,56 +1,37 @@
1
- # coding=utf-8
2
- # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Beans leaf dataset with images of diseased and health leaves."""
16
-
17
  import os
18
 
19
- import datasets
 
20
  from datasets.tasks import ImageClassification
21
 
22
 
23
- _HOMEPAGE = "https://github.com/AI-Lab-Makerere/ibean/"
24
 
25
  _CITATION = """\
26
- @ONLINE {beansdata,
27
- author="Makerere AI Lab",
28
- title="Bean disease dataset",
29
- month="January",
30
- year="2020",
31
- url="https://github.com/AI-Lab-Makerere/ibean/"
32
  }
33
  """
34
 
35
  _DESCRIPTION = """\
36
- Beans is a dataset of images of beans taken in the field using smartphone
37
- cameras. It consists of 3 classes: 2 disease classes and the healthy class.
38
- Diseases depicted include Angular Leaf Spot and Bean Rust. Data was annotated
39
- by experts from the National Crops Resources Research Institute (NaCRRI) in
40
- Uganda and collected by the Makerere AI research lab.
41
  """
42
 
43
- _URLS = {
44
- "train": "https://huggingface.co/datasets/beans/resolve/main/data/train.zip",
45
- "validation": "https://huggingface.co/datasets/beans/resolve/main/data/validation.zip",
46
- "test": "https://huggingface.co/datasets/beans/resolve/main/data/test.zip",
47
- }
48
 
49
- _NAMES = ["angular_leaf_spot", "bean_rust", "healthy"]
50
 
 
 
51
 
52
- class Beans(datasets.GeneratorBasedBuilder):
53
- """Beans plant leaf images dataset."""
54
 
55
  def _info(self):
56
  return datasets.DatasetInfo(
@@ -59,44 +40,66 @@ class Beans(datasets.GeneratorBasedBuilder):
59
  {
60
  "image_file_path": datasets.Value("string"),
61
  "image": datasets.Image(),
62
- "labels": datasets.features.ClassLabel(names=_NAMES),
63
  }
64
  ),
65
- supervised_keys=("image", "labels"),
66
  homepage=_HOMEPAGE,
67
  citation=_CITATION,
68
- task_templates=[ImageClassification(image_column="image", label_column="labels")],
69
  )
70
 
71
  def _split_generators(self, dl_manager):
72
- data_files = dl_manager.download_and_extract(_URLS)
73
  return [
74
  datasets.SplitGenerator(
75
  name=datasets.Split.TRAIN,
76
  gen_kwargs={
77
- "files": dl_manager.iter_files([data_files["train"]]),
 
78
  },
79
  ),
80
  datasets.SplitGenerator(
81
  name=datasets.Split.VALIDATION,
82
  gen_kwargs={
83
- "files": dl_manager.iter_files([data_files["validation"]]),
 
84
  },
85
  ),
86
  datasets.SplitGenerator(
87
  name=datasets.Split.TEST,
88
  gen_kwargs={
89
- "files": dl_manager.iter_files([data_files["test"]]),
 
90
  },
91
  ),
92
  ]
93
 
94
- def _generate_examples(self, files):
95
- for i, path in enumerate(files):
96
- file_name = os.path.basename(path)
97
- if file_name.endswith(".jpg"):
98
- yield i, {
99
- "image_file_path": path,
100
- "image": path,
101
- "labels": os.path.basename(os.path.dirname(path)).lower(),
102
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import datasets
3
+ import requests
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import os
5
 
6
+ from PIL import Image
7
+ from io import BytesIO
8
  from datasets.tasks import ImageClassification
9
 
10
 
11
+ _HOMEPAGE = "https://huggingface.co/datasets/rshrott/renovation"
12
 
13
  _CITATION = """\
14
+ @ONLINE {renovationquality,
15
+ author="Your Name",
16
+ title="Renovation Quality Dataset",
17
+ month="Your Month",
18
+ year="Your Year",
19
+ url="https://huggingface.co/datasets/rshrott/renovation"
20
  }
21
  """
22
 
23
  _DESCRIPTION = """\
24
+ This dataset contains images of various properties, along with labels indicating the quality of renovation - 'cheap', 'average', 'expensive'.
 
 
 
 
25
  """
26
 
27
+ _URL = "https://huggingface.co/datasets/rshrott/renovation/raw/main/labels.csv"
 
 
 
 
28
 
29
+ _NAMES = ["cheap", "average", "expensive"]
30
 
31
+ class RenovationQualityDataset(datasets.GeneratorBasedBuilder):
32
+ """Renovation Quality Dataset."""
33
 
34
+ VERSION = datasets.Version("1.0.0")
 
35
 
36
  def _info(self):
37
  return datasets.DatasetInfo(
 
40
  {
41
  "image_file_path": datasets.Value("string"),
42
  "image": datasets.Image(),
43
+ "label": datasets.features.ClassLabel(names=_NAMES),
44
  }
45
  ),
46
+ supervised_keys=("image", "label"),
47
  homepage=_HOMEPAGE,
48
  citation=_CITATION,
49
+ task_templates=[ImageClassification(image_column="image", label_column="label")],
50
  )
51
 
52
  def _split_generators(self, dl_manager):
53
+ csv_path = dl_manager.download(_URL)
54
  return [
55
  datasets.SplitGenerator(
56
  name=datasets.Split.TRAIN,
57
  gen_kwargs={
58
+ "filepath": csv_path,
59
+ "split": "train",
60
  },
61
  ),
62
  datasets.SplitGenerator(
63
  name=datasets.Split.VALIDATION,
64
  gen_kwargs={
65
+ "filepath": csv_path,
66
+ "split": "validation",
67
  },
68
  ),
69
  datasets.SplitGenerator(
70
  name=datasets.Split.TEST,
71
  gen_kwargs={
72
+ "filepath": csv_path,
73
+ "split": "test",
74
  },
75
  ),
76
  ]
77
 
78
+ def _generate_examples(self, filepath, split):
79
+ def url_to_image(url):
80
+ response = requests.get(url)
81
+ img = Image.open(BytesIO(response.content))
82
+ return img
83
+
84
+ with open(filepath, "r") as f:
85
+ reader = csv.reader(f)
86
+ next(reader) # skip header
87
+ rows = list(reader)
88
+ if split == 'train':
89
+ rows = rows[:int(0.8 * len(rows))]
90
+ elif split == 'validation':
91
+ rows = rows[int(0.8 * len(rows)):int(0.9 * len(rows))]
92
+ else: # test
93
+ rows = rows[int(0.9 * len(rows)):]
94
+
95
+ for id_, row in enumerate(rows):
96
+ if len(row) < 2:
97
+ print(f"Row with id {id_} has less than 2 elements: {row}")
98
+ else:
99
+ image_file_path = str(row[0])
100
+ image = url_to_image(image_file_path)
101
+ yield id_, {
102
+ 'image_file_path': image_file_path,
103
+ 'image': image,
104
+ 'label': row[1],
105
+ }