fedric95 commited on
Commit
8978b0a
·
verified ·
1 Parent(s): a0a907d

Delete download.py

Browse files
Files changed (1) hide show
  1. download.py +0 -153
download.py DELETED
@@ -1,153 +0,0 @@
1
- import json
2
- from shapely.geometry import shape
3
- import os
4
- import collections
5
- from tqdm import tqdm
6
- import pandas as pd
7
- from multiprocessing import cpu_count
8
- from multiprocessing.pool import ThreadPool
9
- import requests
10
- import networkx as nx
11
- import rasterio as rio
12
- from extract import extract
13
- from tiling import get_tiles
14
-
15
- from huggingface_hub import HfApi
16
- from datetime import datetime
17
- from tiling import get_tiles
18
-
19
- def download_image(args):
20
- url, fn = args['image_href'], args['image']
21
-
22
- if os.path.exists(fn) is False:
23
- try:
24
- r = requests.get(url)
25
- with open(fn, 'wb') as f:
26
- f.write(r.content)
27
- except Exception as e:
28
- print('Exception in download_url():', e)
29
- return url
30
-
31
- with rio.open(fn, "r") as ds:
32
- print(args['date'], args['id'], ds.crs.to_proj4())
33
-
34
- return url
35
-
36
-
37
- if __name__ == '__main__':
38
- api = HfApi()
39
-
40
- image_dir = './dataset/image'
41
-
42
- if os.path.exists('data.json') == False:
43
- extract('data.json')
44
-
45
- with open('data.json') as f:
46
- data = json.load(f)
47
-
48
-
49
- ids = [f['id'] for f in data['features']]
50
- duplicated = [item for item, count in collections.Counter(ids).items() if count > 1]
51
- for duplicated_instance in duplicated:
52
- items = []
53
- for f in data['features']:
54
- if f['id'] == duplicated_instance:
55
- items.append(json.dumps(f))
56
- assert len(collections.Counter(items).keys()) == 1, 'Unexpected duplicated item' # Tutti gli elementi che hanno lo stesso id sono completamente identici
57
-
58
- # Prendo tutte le feature che sono univoce a livello di contenuto
59
- data['features'] =[json.loads(f) for f in list(set([json.dumps(f) for f in data['features']]))]
60
- #data['features'] = data['features'][:2]
61
-
62
- records = []
63
- for idx in tqdm(range(len(data['features']))):
64
- feature = data['features'][idx]
65
-
66
- gec = feature['assets'].get('GEC')
67
- if gec is None:
68
- continue
69
-
70
-
71
- metadata = feature['assets']['metadata']['content']
72
- assert len(metadata['collects']) == 1, 'Unexpected situation'
73
- assert len(metadata['derivedProducts']['GEC']) == 1, 'Unexpected situation'
74
-
75
- parsed_date = datetime.fromisoformat(metadata['collects'][0]['startAtUTC'])
76
- if parsed_date.year < 2024:
77
- continue
78
-
79
-
80
- records.append({
81
- 'id': feature['id'],
82
- 'date': metadata['collects'][0]['startAtUTC'],
83
- 'bbox': feature['bbox'],
84
- 'geometry': feature['geometry'],
85
- 'satellite': metadata['umbraSatelliteName'],
86
- 'track': metadata['collects'][0]['satelliteTrack'],
87
- 'direction': metadata['collects'][0]['observationDirection'],
88
- 'mode': metadata['imagingMode'],
89
- 'band': metadata['collects'][0]['radarBand'],
90
- 'polarization': metadata['collects'][0]['polarizations'],
91
- 'azimuth_res': metadata['derivedProducts']['GEC'][0]['groundResolution']['azimuthMeters'],
92
- 'range_res': metadata['derivedProducts']['GEC'][0]['groundResolution']['rangeMeters'],
93
- 'rows': metadata['derivedProducts']['GEC'][0]['numRows'],
94
- 'cols': metadata['derivedProducts']['GEC'][0]['numColumns'],
95
- 'size': metadata['derivedProducts']['GEC'][0]['numRows']*metadata['derivedProducts']['GEC'][0]['numColumns'],
96
- 'image_href': gec['href'],
97
- 'image': os.path.join(image_dir, '{name}.tiff'.format(name=feature['id']))
98
- })
99
-
100
-
101
-
102
- cpus = cpu_count()
103
- results = ThreadPool(cpus - 1).imap_unordered(download_image, records)
104
- for result in results:
105
- print('url:', result)
106
-
107
- for record in records:
108
- try:
109
- with rio.open(record['image']) as src:
110
- image_crs = src.crs.to_proj4()
111
- record['crs'] = src.crs.to_proj4()
112
- except:
113
- record['crs'] = 'None'
114
- print('Error reading the image')
115
-
116
- df = pd.DataFrame.from_records(records)
117
- df.to_excel('out.xlsx')
118
-
119
-
120
-
121
- selected_records = []
122
- for record in records:
123
- if record['crs'] == '+proj=longlat +datum=WGS84 +no_defs=True':
124
- out_dir = 'dataset/tile/{id}'.format(id = record['id'])
125
- if os.path.exists(out_dir) is False:
126
- os.mkdir(out_dir)
127
- selected_records.append({'input_path': record['image'], 'out_dir': out_dir, 'patch_size': 2048})
128
- cpus = cpu_count()
129
- results = ThreadPool(cpus - 1).imap_unordered(get_tiles, selected_records)
130
- for result in results:
131
- print('url:', result)
132
-
133
-
134
-
135
- api.upload_file(
136
- path_or_fileobj='out.xlsx',
137
- path_in_repo='out.xlsx',
138
- repo_id='fedric95/umbra',
139
- repo_type='dataset',
140
- )
141
-
142
- api.upload_file(
143
- path_or_fileobj='data.json',
144
- path_in_repo='data.json',
145
- repo_id='fedric95/umbra',
146
- repo_type='dataset',
147
- )
148
-
149
- api.upload_large_folder(
150
- repo_id='fedric95/umbra',
151
- repo_type='dataset',
152
- folder_path='./dataset/',
153
- )