File size: 6,532 Bytes
7c96b72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import os
import pandas as pd
import datasets
from os.path import join

# convert these to features
#id,latitude,longitude,thumb_original_url,country,sequence,captured_at,lon_bin,lat_bin,cell,region,sub-region,city,land_cover,road_index,drive_side,climate,soil,dist_sea,quadtree_10_5000,quadtree_10_25000,quadtree_10_1000,quadtree_10_50000,quadtree_10_12500,quadtree_10_500,quadtree_10_2500,unique_region,unique_sub-region,unique_city,unique_country,creator_username,creator_id
#3859149887465501,-43.804769384023,-176.61409250805,,8,"(0, 8)",Chatham Islands,,Waitangi,4,4.661764145,1,15,3,0.0068841379890803,0,0,0,0,0,0,0,Chatham Islands_NZ,,Waitangi_NaN_Chatham Islands_NZ,NZ,roadroid,111336221091714.0

class OSV5M(datasets.GeneratorBasedBuilder):
    def __init__(self, *args, **kwargs):
        self.full = kwargs.pop('full', False)
        super().__init__(*args, **kwargs)
        print('OSV5M', self.__dict__)

    def _info(self):
        if self.full:
            return datasets.DatasetInfo(
                features=datasets.Features(
                    {
                        "image": datasets.Image(),
                        "latitude": datasets.Value("float32"),
                        "longitude": datasets.Value("float32"),
                        "thumb_original_url": datasets.Value("string"),
                        "country": datasets.Value("string"),
                        "sequence": datasets.Value("string"),
                        "captured_at": datasets.Value("string"),
                        "lon_bin": datasets.Value("float32"),
                        "lat_bin": datasets.Value("float32"),
                        "cell": datasets.Value("string"),
                        "region": datasets.Value("string"),
                        "sub-region": datasets.Value("string"),
                        "city": datasets.Value("string"),
                        "land_cover": datasets.Value("float32"),
                        "road_index": datasets.Value("float32"),
                        "drive_side": datasets.Value("float32"),
                        "climate": datasets.Value("float32"),
                        "soil": datasets.Value("float32"),
                        "dist_sea": datasets.Value("float32"),
                        "quadtree_10_5000": datasets.Value("int32"),
                        "quadtree_10_25000": datasets.Value("int32"),
                        "quadtree_10_1000": datasets.Value("int32"),
                        "quadtree_10_50000": datasets.Value("int32"),
                        "quadtree_10_12500": datasets.Value("int32"),
                        "quadtree_10_500": datasets.Value("int32"),
                        "quadtree_10_2500": datasets.Value("int32"),
                        "unique_region": datasets.Value("string"),
                        "unique_sub-region": datasets.Value("string"),
                        "unique_city": datasets.Value("string"),
                        "unique_country": datasets.Value("string"),
                        "creator_username": datasets.Value("string"),
                        "creator_id": datasets.Value("string"),
                    }
                )
            )
        else:
            return datasets.DatasetInfo(
                features=datasets.Features(
                    {
                        "image": datasets.Image(),
                        "latitude": datasets.Value("float32"),
                        "longitude": datasets.Value("float32"),
                        "country": datasets.Value("string"),
                        "region": datasets.Value("string"),
                        "sub-region": datasets.Value("string"),
                        "city": datasets.Value("string"),
                    }
                )
            )

    def df(self, annotation_path):
        if not hasattr(self, 'df_'):
            self.df_ = {}
        if annotation_path not in self.df_:
            df = pd.read_csv(annotation_path, dtype={
                'id': str, 'creator_id': str, 'creator_username': str, 
                'unique_country': str, 'unique_city': str, 'unique_sub-region': str, 'unique_region': str,
                'quadtree_10_2500': int, 'quadtree_10_500': int, 'quadtree_10_12500': int, 'quadtree_10_50000': int, 'quadtree_10_1000': int, 'quadtree_10_25000': int, 'quadtree_10_5000': int,
                'dist_sea': float, 'soil': float, 'climate': float, 'drive_side': float, 'road_index': float, 'land_cover': float, 'city': str, 'sub-region': str, 'region': str, 'cell': str, 'lat_bin': float, 'lon_bin': float, 'captured_at': str, 'sequence': str, 'country': str, 'thumb_original_url': str, 'longitude': float, 'latitude': float
            })
            if not self.full:
                df = df[['id', 'latitude', 'longitude', 'country', 'region', 'sub-region', 'city']]

            df = df.set_index('id')
            self.df_[annotation_path] = df.to_dict('index')
        return self.df_[annotation_path]

    def _split_generators(self, dl_manager):
        _URLS = {
            "train": [join('images', 'train', str(i).zfill(2) + '.zip') for i in range(98)],
            "test": [join('images', 'test', str(i).zfill(2) + '.zip') for i in range(5)],
            "train_meta": "train.csv",
            "test_meta": "test.csv",
        }

        data_files = dl_manager.download_and_extract(_URLS)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "image_paths": dl_manager.iter_files(data_files["train"]),
                    "annotation_path": data_files["train_meta"],
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "image_paths": dl_manager.iter_files(data_files["test"]),
                    "annotation_path": data_files["test_meta"],
                },
            ),
        ]

    def _generate_examples(self, image_paths, annotation_path):
        """Generate examples."""
        df = self.df(annotation_path)
        for idx, image_path in enumerate(image_paths):
            info_id = os.path.splitext(os.path.split(image_path)[-1])[0]
            try:
                example = {
                    "image": image_path,
                } | df[info_id]
            except Exception as e:
                print('Exception ' + str(e), info_id, idx, image_path, sep='\n')
                continue

            yield idx, example