Upload 2 files
Browse files- WaterFlowCounter.json +0 -0
- WaterFlowCountersRecognition.py +24 -15
WaterFlowCounter.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
WaterFlowCountersRecognition.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import json
|
2 |
import os
|
|
|
3 |
|
4 |
import datasets
|
5 |
|
@@ -21,13 +22,13 @@ _REGION_NAME = ['value_a', 'value_b', 'serial']
|
|
21 |
|
22 |
_REGION_ROTETION = ['0', '90', '180', '270']
|
23 |
|
24 |
-
|
25 |
|
26 |
|
27 |
class WaterFlowCounterConfig(datasets.BuilderConfig):
|
28 |
"""Builder Config for WaterFlowCounter"""
|
29 |
|
30 |
-
def __init__(self, data_url,
|
31 |
"""BuilderConfig for WaterFlowCounter.
|
32 |
Args:
|
33 |
data_url: `string`, url to download the photos.
|
@@ -36,7 +37,7 @@ class WaterFlowCounterConfig(datasets.BuilderConfig):
|
|
36 |
"""
|
37 |
super(WaterFlowCounterConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
|
38 |
self.data_url = data_url
|
39 |
-
|
40 |
|
41 |
|
42 |
class WaterFlowCounter(datasets.GeneratorBasedBuilder):
|
@@ -47,10 +48,10 @@ class WaterFlowCounter(datasets.GeneratorBasedBuilder):
|
|
47 |
name="WFCR_full",
|
48 |
description="Full dataset which contains coordinates and names of regions and information about rotation",
|
49 |
data_url={
|
50 |
-
"train": "
|
51 |
-
"test": "
|
52 |
},
|
53 |
-
|
54 |
)
|
55 |
]
|
56 |
|
@@ -78,26 +79,25 @@ class WaterFlowCounter(datasets.GeneratorBasedBuilder):
|
|
78 |
|
79 |
def _split_generators(self, dl_manager):
|
80 |
data_files = dl_manager.download_and_extract(self.config.data_url)
|
81 |
-
metadata_files = dl_manager.download_and_extract(self.config.metadata_url)
|
82 |
|
83 |
return [
|
84 |
datasets.SplitGenerator(
|
85 |
name=datasets.Split.TRAIN,
|
86 |
gen_kwargs={
|
87 |
"folder_dir": data_files["train"],
|
88 |
-
|
89 |
},
|
90 |
),
|
91 |
datasets.SplitGenerator(
|
92 |
name=datasets.Split.TEST,
|
93 |
gen_kwargs={
|
94 |
"folder_dir": data_files["test"],
|
95 |
-
|
96 |
},
|
97 |
)
|
98 |
]
|
99 |
|
100 |
-
def
|
101 |
name_to_id = {}
|
102 |
rotation_to_id = {}
|
103 |
|
@@ -108,27 +108,36 @@ class WaterFlowCounter(datasets.GeneratorBasedBuilder):
|
|
108 |
rotation_to_id[name] = indx
|
109 |
|
110 |
|
111 |
-
with open(
|
112 |
annotations = json.load(f)
|
|
|
|
|
113 |
|
114 |
for file in os.listdir(folder_dir):
|
115 |
filepath = os.path.join(folder_dir, file)
|
116 |
-
|
117 |
with open(filepath, "rb") as f:
|
118 |
image_bytes = f.read()
|
119 |
-
|
|
|
120 |
all_x = []
|
121 |
all_y = []
|
122 |
names = []
|
123 |
|
124 |
for el in annotations['_via_img_metadata']:
|
|
|
125 |
if annotations['_via_img_metadata'][el]['filename'] == file:
|
|
|
126 |
for region in annotations['_via_img_metadata'][el]['regions']:
|
127 |
all_x.append(region['shape_attributes']['all_points_x'])
|
128 |
all_y.append(region['shape_attributes']['all_points_y'])
|
129 |
names.append(name_to_id[list(region['region_attributes']['name'].keys())[0]])
|
130 |
-
|
131 |
-
|
|
|
|
|
|
|
|
|
132 |
yield idx, {
|
133 |
"image": {"path": filepath, "bytes": image_bytes},
|
134 |
"regions": {
|
|
|
1 |
import json
|
2 |
import os
|
3 |
+
import collections
|
4 |
|
5 |
import datasets
|
6 |
|
|
|
22 |
|
23 |
_REGION_ROTETION = ['0', '90', '180', '270']
|
24 |
|
25 |
+
_ANNOTATIONS_PATH = "WaterFlowCounter.json"
|
26 |
|
27 |
|
28 |
class WaterFlowCounterConfig(datasets.BuilderConfig):
|
29 |
"""Builder Config for WaterFlowCounter"""
|
30 |
|
31 |
+
def __init__(self, data_url, **kwargs):
|
32 |
"""BuilderConfig for WaterFlowCounter.
|
33 |
Args:
|
34 |
data_url: `string`, url to download the photos.
|
|
|
37 |
"""
|
38 |
super(WaterFlowCounterConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
|
39 |
self.data_url = data_url
|
40 |
+
|
41 |
|
42 |
|
43 |
class WaterFlowCounter(datasets.GeneratorBasedBuilder):
|
|
|
48 |
name="WFCR_full",
|
49 |
description="Full dataset which contains coordinates and names of regions and information about rotation",
|
50 |
data_url={
|
51 |
+
"train": "data/train_photos.zip",
|
52 |
+
"test": "data/test_photos.zip",
|
53 |
},
|
54 |
+
|
55 |
)
|
56 |
]
|
57 |
|
|
|
79 |
|
80 |
def _split_generators(self, dl_manager):
|
81 |
data_files = dl_manager.download_and_extract(self.config.data_url)
|
|
|
82 |
|
83 |
return [
|
84 |
datasets.SplitGenerator(
|
85 |
name=datasets.Split.TRAIN,
|
86 |
gen_kwargs={
|
87 |
"folder_dir": data_files["train"],
|
88 |
+
|
89 |
},
|
90 |
),
|
91 |
datasets.SplitGenerator(
|
92 |
name=datasets.Split.TEST,
|
93 |
gen_kwargs={
|
94 |
"folder_dir": data_files["test"],
|
95 |
+
|
96 |
},
|
97 |
)
|
98 |
]
|
99 |
|
100 |
+
def _generate_examples(self, folder_dir):
|
101 |
name_to_id = {}
|
102 |
rotation_to_id = {}
|
103 |
|
|
|
108 |
rotation_to_id[name] = indx
|
109 |
|
110 |
|
111 |
+
with open(_ANNOTATIONS_PATH, "r", encoding='utf-8') as f:
|
112 |
annotations = json.load(f)
|
113 |
+
|
114 |
+
#print(annotations['_via_image_id_list'][0:5])
|
115 |
|
116 |
for file in os.listdir(folder_dir):
|
117 |
filepath = os.path.join(folder_dir, file)
|
118 |
+
#print(filepath)
|
119 |
with open(filepath, "rb") as f:
|
120 |
image_bytes = f.read()
|
121 |
+
#print(image_bytes)
|
122 |
+
idx = 0
|
123 |
all_x = []
|
124 |
all_y = []
|
125 |
names = []
|
126 |
|
127 |
for el in annotations['_via_img_metadata']:
|
128 |
+
|
129 |
if annotations['_via_img_metadata'][el]['filename'] == file:
|
130 |
+
print(f'\n***********{el}***************')
|
131 |
for region in annotations['_via_img_metadata'][el]['regions']:
|
132 |
all_x.append(region['shape_attributes']['all_points_x'])
|
133 |
all_y.append(region['shape_attributes']['all_points_y'])
|
134 |
names.append(name_to_id[list(region['region_attributes']['name'].keys())[0]])
|
135 |
+
try:
|
136 |
+
rotated = [rotation_to_id[list(region['region_attributes']['rotated'].keys())[0]]]
|
137 |
+
except:
|
138 |
+
rotated = [int(region['region_attributes']['rotated'])]
|
139 |
+
print(f'****{names, rotated}******')
|
140 |
+
|
141 |
yield idx, {
|
142 |
"image": {"path": filepath, "bytes": image_bytes},
|
143 |
"regions": {
|