youl commited on
Commit
98e563e
1 Parent(s): 3c18e87

New application

Browse files
Files changed (5) hide show
  1. api.py +84 -0
  2. app.py +108 -0
  3. indices.py +157 -0
  4. processing.py +151 -0
  5. requirements.txt +13 -0
api.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sentinelsat import SentinelAPI, read_geojson, geojson_to_wkt
2
+ from datetime import datetime, timedelta,date
3
+ import zipfile
4
+ import rasterio
5
+ from rasterio.plot import show
6
+ from PIL import Image
7
+ import matplotlib.pyplot as plt
8
+ import numpy as np
9
+ import pandas as pd
10
+ import os
11
+ from glob import glob
12
+ from tqdm import tqdm
13
+ #from haversine import haversine, Unit
14
+ #from xml.etree import ElementTree as et
15
+ import xmltodict
16
+ import json
17
+ import warnings
18
+ import shutil
19
+ from shapely.geometry import Point
20
+ from shapely.geometry.polygon import Polygon
21
+ warnings.filterwarnings('ignore')
22
+
23
+ ##
24
+ def unzip():
25
+ files = glob('*.zip')
26
+ for file in files:
27
+ with zipfile.ZipFile(file, 'r') as zip_ref:
28
+ zip_ref.extractall()
29
+
30
+
31
+ ##
32
+ def select_best_cloud_coverage_tile():
33
+ tile_names = {}
34
+ cld_prob = []
35
+ folders = glob('*.SAFE')
36
+ for fold in folders:
37
+ metadata_path = fold+"/MTD_MSIL2A.xml"
38
+ xml_file=open(metadata_path,"r")
39
+ xml_string=xml_file.read()
40
+ python_dict=xmltodict.parse(xml_string)
41
+ cld = float(python_dict["n1:Level-2A_User_Product"]["n1:Quality_Indicators_Info"]["Cloud_Coverage_Assessment"])
42
+ tile_names[cld] = fold
43
+ cld_prob.append(cld)
44
+ name = tile_names[min(cld_prob)]
45
+ dates = name.split('_')[2][:8]
46
+ acquisition_date = datetime.strptime(dates, "%Y%m%d")
47
+ today = datetime.now()
48
+ delta = (today - acquisition_date)
49
+ days_ago = delta.days
50
+ return name,min(cld_prob),days_ago
51
+
52
+ ##
53
+ def find_good_tile(df, point):
54
+ for row in tqdm(df.itertuples(),total=len(df)):
55
+ tile_name = row.tiles
56
+ coordinate = row.coords
57
+ x = coordinate.replace(']','')
58
+ x = x.replace('[','')
59
+ x = x.replace("'",'')
60
+ tab = [[float(x.split(",")[i]),float(x.split(",")[i+1])] for i in range(0,len(x.split(",")),2)]
61
+ polygon = Polygon(tab)
62
+ result = polygon.contains(point)
63
+ if result:
64
+ print(tile_name)
65
+ return tile_name
66
+
67
+ return 404
68
+
69
+ ##
70
+ def delete_tiles():
71
+ files = glob('*.zip')
72
+ folders1 = glob('*.SAFE')
73
+ folders2 = glob("*.tmp")
74
+ for f in files:
75
+ os.remove(f)
76
+ for fold in folders1:
77
+ shutil.rmtree(fold, ignore_errors=True)
78
+
79
+ for fold in folders2:
80
+ shutil.rmtree(fold, ignore_errors=True)
81
+
82
+
83
+
84
+
app.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from api import *
3
+ from processing import *
4
+ import pandas as pd
5
+ from indices import indices
6
+ import xgboost as xgb
7
+ import pickle
8
+ import json
9
+ import boto3
10
+ from shapely.geometry import MultiPolygon,shape
11
+ from shapely.geometry import Point
12
+ from shapely.geometry.polygon import Polygon
13
+ from glob import glob
14
+ import wget
15
+
16
+
17
+ def predict(lat, lon):
18
+ cord = [lon,lat]
19
+ lon = round(lon,4)
20
+ lat = round(lat,4)
21
+ x1 = [lon,lat]
22
+ x2 = [lat,lon]
23
+ with open("data/CIV_0.json","r") as file:
24
+ data = json.load(file)
25
+ # extract ivory coast polygone
26
+ features = [data['features'][0]['geometry']['coordinates'][0]+data['features'][0]['geometry']['coordinates'][1]+data['features'][0]['geometry']['coordinates'][2]]
27
+ data['features'][0]['geometry']['coordinates'] = features
28
+ ci_polygone = data['features'][0]['geometry']['coordinates'][0][0]
29
+ point1 = Point(x1)
30
+ point2 = Point(x2)
31
+ polygon = Polygon(ci_polygone)
32
+ result = polygon.contains(point1)
33
+
34
+ if not result:
35
+ return "Choisissez une zone de la CI","","","",""
36
+
37
+ else:
38
+ df = pd.read_csv("data/frame.csv")
39
+ name = find_good_tile(df,point2)
40
+ if name ==404:
41
+ reponse = "Sentinel-2 ne dispose pas de données ce sur ce lieu à ce jour"
42
+ return reponse,"","","",""
43
+ else:
44
+ path = "https://data354-public-assets.s3.eu-west-3.amazonaws.com/cisentineldata/"
45
+ url = path+name
46
+ wget.download(url)
47
+ unzip()
48
+ name,cld_prob,days_ago = select_best_cloud_coverage_tile()
49
+ bandes_path_10,bandes_path_20,bandes_path_60,tile_path,path_cld_20,path_cld_60 =paths(name)
50
+ # create image dataset
51
+ images_10 = extract_sub_image(bandes_path_10,tile_path,cord)
52
+
53
+ # bandes with 20m resolution
54
+ #path_cld_20
55
+ images_20 = extract_sub_image(bandes_path_20,tile_path,cord,20,1)
56
+
57
+ # bandes with 60m resolution
58
+ #path_cld_60
59
+ images_60 = extract_sub_image(bandes_path_60,tile_path,cord,60)
60
+ #
61
+ feature = images_10.tolist()+images_20.tolist()+images_60.tolist()
62
+ bands = ['B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B11', 'B12','B01','B09']
63
+ X = pd.DataFrame([feature],columns = bands)
64
+ # vegetation index calculation
65
+ X = indices(X)
66
+ # load the model from disk
67
+ filename = "data/finalized_model.sav"
68
+ loaded_model = pickle.load(open(filename, 'rb'))
69
+ # make prediction
70
+ biomass = loaded_model.predict(X)[0]
71
+ carbon = 0.55*biomass
72
+
73
+ # NDVI
74
+ ndvi_index = ndvi(cord,name)
75
+
76
+ # deleted download files
77
+ delete_tiles()
78
+
79
+ return str(cld_prob)+ " % cloud coverage", str(days_ago)+" days ago",str(biomass)+" Mg/ha", str(carbon)+" MgC/ha","NDVI: "+ str(ndvi_index)
80
+
81
+ # Create title, description and article strings
82
+ title = "🌴BEEPAS : Biomass estimation to Evaluate the Environmental Performance of Agroforestry Systems🌴"
83
+ description = "This application estimates the biomass of certain areas using AI and satellite images (S2)."
84
+ article = "Created by data354."
85
+
86
+ # Create examples list from "examples/" directory
87
+ #example_list = [["examples/" + example] for example in os.listdir("examples")]
88
+ example_list = [[5.379913, -4.050445],[6.54644,-7.86156],[5.346938, -4.027849]]
89
+
90
+ outputs = [
91
+ gr.Textbox(label="Cloud coverage"),
92
+ gr.Textbox(label="Number of days since sensing"),
93
+ gr.Textbox(label="Above ground biomass density(AGBD) Mg/ha"),
94
+ gr.Textbox(label="Carbon stock density MgC/ha "),
95
+ gr.Textbox(label="Mean NDVI"),]
96
+
97
+
98
+ demo = gr.Interface(
99
+ fn=predict,
100
+ inputs=["number", "number"],
101
+ outputs=outputs, #[ "text", "text","text","text","text"],
102
+ examples=example_list,
103
+ title=title,
104
+ description=description,
105
+ article=article,
106
+ )
107
+
108
+ demo.launch(share=True)
indices.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ def indices(X):
4
+ # Calculate vegetation indices
5
+ #S2REP
6
+ #OTHERS
7
+
8
+ X["MTCI"] = (X["B06"]-X["B05"])/(X["B05"]-X["B04"])
9
+ X["AWEInsh"] = 4.0*(X["B03"]-X["B11"])-0.25*X["B08"]+2.75*X["B12"]
10
+ X["NBSIMS"] = 0.36*(X["B03"]+X["B04"]+X["B08"]) - (((X["B02"]+X["B12"])/X["B03"])+X["B11"])
11
+ X["MuWIR"] = -4.0*((X["B02"]-X["B03"])/(X["B02"]+X["B03"]))+2.0*((X["B03"]-X["B08"])/(X["B03"]+X["B08"]))+2.0*((X["B03"]-X["B12"])/(X["B03"]+X["B12"]))-((X["B03"]-X["B11"])/(X["B03"]+X["B11"]))
12
+ X["VARI700"] = (X["B05"]-1.7*X["B04"]+0.7*X["B02"])/(X["B05"]+1.3*X["B04"]-1.3*X["B02"])
13
+
14
+ X["S2WI"] = (X["B05"]-X["B12"])/(X["B05"]+X["B12"])
15
+
16
+ X["NBAI"] = ((X["B12"]-X["B11"])/X["B03"])/((X["B12"]+X["B11"])/X["B03"])
17
+
18
+ X["TCARI"] = 3*((X["B05"]-X["B04"]))-0.2*(X["B05"]-X["B03"])*(X["B05"]/X["B04"])
19
+
20
+ X["WI2015"] = 1.7204+171*X["B03"]+3*X["B04"]-70*X["B08"]-45*X["B11"]-71*X["B12"]
21
+
22
+ X["BAIM"] = 1.0/((0.05-X["B08"])**2.0)+((0.2-X["B12"])**2.0)
23
+
24
+ X["NDDI"] = (( (X["B08"]-X["B04"])/(X["B08"]+X["B04"])-((X["B03"]-X["B08"])/(X["B03"]+X["B08"])))/((X["B08"]-X["B04"])/(X["B08"]+X["B04"]))+((X["B03"]-X["B08"])/(X["B03"]+X["B08"])))
25
+ X["BCC"] = X["B02"]/(X["B04"]+X["B03"]+X["B02"])
26
+
27
+ X["RCC"] = X["B04"]/(X["B04"]+X["B03"]+X["B02"])
28
+
29
+ X["IKAW"] = (X["B08"]-X["B02"])/(X["B08"]+X["B02"])
30
+
31
+ X["ARI"] = (1/X["B03"])-(1/X["B05"])
32
+
33
+ X["MIRBI"] = 10.0*X["B12"]-9.8*X["B11"]+2.0
34
+
35
+ X["NMDI2"] = (X["B08"]-(X["B11"]-X["B12"]))/(X["B08"]+(X["B11"]-X["B12"]))
36
+
37
+ X["TTVI"] = 0.5*((865.0-740.0)*(X["B8A"]-X["B06"]-(X["B07"]-X["B06"])*(783.0-740)))
38
+
39
+ X["NHFD"] = (X["B05"]-X["B01"])/(X["B05"]+X["B01"])
40
+
41
+ X["NDSWIR"] = (X["B11"]-X["B8A"])/(X["B11"]+X["B8A"])
42
+
43
+ X["NBRSWIR"] = (X["B12"]-X["B11"]-0.02)/(X["B12"]+X["B11"]+0.1)
44
+
45
+ X["NBR"] = (X["B12"]-X["B8A"])/(X["B12"]+X["B8A"])
46
+
47
+ X["NBRplus"] = (X["B12"]-X["B8A"]-X["B03"]-X["B02"])/(X["B12"]+X["B8A"]+X["B03"]+X["B02"])
48
+
49
+ X["NDWI2"] = (X["B02"]-X["B08"])/(X["B02"]+X["B08"])
50
+
51
+ X["NDWI2"] = (X["B01"]-X["B08"])/(X["B01"]+X["B08"])
52
+
53
+
54
+ X["S2REP"] = 705 + 35 * ((((X["B07"] + X["B04"])/2) - X["B05"])/(X["B06"] - X["B05"]))
55
+ #S2REP = 705 + 35 * ((((X[:, 6, :] + X[:, 3, :])/2) - X[:, 4, :])/(X[:, 5, :] - X[:, 4, :]))
56
+
57
+ X["CCCI"] = ((X["B08"] - X["B05"]) / (X["B08"] + X["B05"])) / ((X["B08"] - X["B04"]) / (X["B08"] + X["B04"]))
58
+ #CCCI = ((X[:, 7, :] - X[:, 4, :]) / (X[:, 7, :] + X[:, 4, :])) / ((X[:, 7, :] - X[:, 3, :]) / (X[:, 7, :] + X[:, 3, :]))
59
+
60
+ X["MCARI"] = ((X["B05"] - X["B04"]) - 2 * (X["B05"] - X["B03"])) * (X["B05"] / X["B04"])
61
+ #MCARI = ((X[:, 4, :] - X[:, 3, :]) - 2 * (X[:, 4, :] - X[:, 2, :])) * (X[:, 4, :] / X[:, 3, :])
62
+
63
+ X["TCARI"] = 3 * ((X["B05"] - X["B04"]) - 0.2 * (X["B05"] - X["B03"]) * (X["B05"] / X["B04"]))
64
+ #TCARI = 3 * ((X[:, 4, :] - X[:, 3, :]) - 0.2 * (X[:, 4, :] - X[:, 2, :]) * (X[:, 4, :] / X[:, 3, :]))
65
+
66
+ X["PVI"] = (X["B08"] - 0.3 * X["B04"] - 0.5) / ((1 + 0.3 * 2) ** (1/2.0))
67
+ #PVI = (X[:, 7, :] - 0.3 * X[:, 3, :] - 0.5) / ((1 + 0.3 * 2) ** (1/2.0))
68
+
69
+ X["ndvi"] = (X["B08"] - X["B04"]) / (X["B08"] + X["B04"])
70
+ #ndvi = (X[:, 7, :] - X[:, 3, :]) / (X[:, 7, :] + X[:, 3, :])
71
+
72
+ X["evi"] = 2.5 * (X["B08"] - X["B04"]) / (X["B08"] + 6 * X["B04"] - 7.5 * X["B02"] + 1)
73
+ #evi = 2.5 * (X[:, 7, :] - X[:, 3, :]) / (X[:, 7, :] + 6 * X[:, 3, :] - 7.5 * X[:, 1, :] + 1)
74
+
75
+ X["savi"] = (X["B08"] - X["B04"]) / (X["B08"] + X["B04"] + 0.5)
76
+ #savi = (X[:, 7, :] - X[:, 3, :]) / (X[:, 7, :] + X[:, 3, :] + 0.5)
77
+ X["mndwi"] = (X["B03"] - X["B08"]) / (X["B03"] + X["B08"])
78
+ #mndwi = (X[:, 2, :] - X[:, 7, :]) / (X[:, 2, :] + X[:, 7, :])
79
+
80
+
81
+ X["ARVI"] = (X["B08"] - (2 * X["B04"]) + X["B02"]) / (X["B08"] + (2 * X["B04"]) + X["B02"])
82
+ #ARVI = (X[:, 7, :] - (2 * X[:, 3, :]) + X[:, 1, :]) / (X[:, 7, :] + (2 * X[:, 3, :]) + X[:, 1, :])
83
+
84
+ X["SIPI"] = (X["B08"] - X["B02"]) / (X["B08"] - X["B04"])
85
+ #SIPI = (X[:, 7, :] - X[:, 1, :]) / (X[:, 7, :] - X[:, 3, :])
86
+
87
+ X["RENDVI"] = (X["B06"] - X["B05"]) / (X["B06"] + X["B05"])
88
+ #RENDVI = (X[:, 5, :] - X[:, 4, :]) / (X[:, 5, :] + X[:, 4, :])
89
+
90
+ X["MRESR"] = (X["B06"] - X["B01"]) / (X["B05"] - X["B01"])
91
+ #MRESR = (X[:, 5, :] - X[:, 0, :]) / (X[:, 4, :] - X[:, 0, :])
92
+
93
+ # CANOLA
94
+ X["RYI"] = X["B03"] / X["B02"]
95
+ #RYI = X[:, 2, :] / X[:, 1, :]
96
+
97
+ X["NDYI"] = (X["B03"] - X["B02"]) / (X["B03"] + X["B02"])
98
+ #NDYI = (X[:, 2, :] - X[:, 1, :]) / (X[:, 2, :] + X[:, 1, :])
99
+
100
+ X["DYI"] = X["B03"] - X["B02"]
101
+ #DYI = X[:, 2, :] - X[:, 1, :]
102
+
103
+ X["ACI"] = X["B08"] * (X["B04"] + X["B03"])
104
+ #ACI = X[:, 7, :] * (X[:, 3, :] + X[:, 2, :])
105
+
106
+ # WEED
107
+ X["CVI"] = (X["B08"] / X["B03"]) * (X["B04"] / X["B03"])
108
+ #CVI = (X[:, 7, :] / X[:, 2, :]) * (X[:, 3, :] / X[:, 2, :])
109
+
110
+ X["AVI"] = (X["B08"] * (1 - X["B04"]) * (X["B08"] - X["B04"]))
111
+ #AVI = (X[:, 7, :] * (1 - X[:, 3, :]) * (X[:, 7, :] - X[:, 3, :]))
112
+
113
+ X["SI"] = ((1 - X["B02"]) * (1 - X["B03"]) * (1 - X["B04"]))
114
+ #SI= ((1 - X[:, 1, :]) * (1 - X[:, 2, :]) * (1 - X[:, 3, :]))
115
+
116
+ X["BSI"] = ((X["B11"] + X["B04"]) - (X["B08"] + X["B02"])) / ((X["B11"] + X["B04"]) + (X["B08"] + X["B02"]))
117
+ #BSI= ((X[:, 10, :] + X[:, 3, :]) - (X[:, 7, :] + X[:, 1, :])) / ((X[:, 10, :] + X[:, 3, :]) + (X[:, 7, :] + X[:, 1, :]))
118
+
119
+ # WINE GRAPES
120
+ X["MTCI"] = (X["B06"] - X["B05"])/(X["B05"] - X["B04"])
121
+ #MTCI = (X[:, 5, :] - X[:, 4, :])/(X[:, 4, :] - X[:, 3, :])
122
+
123
+ X["NPCRI"] = (X["B04"] - X["B02"]) / (X["B04"] + X["B02"])
124
+ #NPCRI = (X[:, 3, :] - X[:, 1, :]) / (X[:, 3, :] + X[:, 1, :])
125
+
126
+
127
+
128
+ # ROOIBOS
129
+ X["BAI"] = 1/((0.1 - X["B04"]) ** 2 + (0.06 - X["B08"]) ** 2)
130
+ #BAI = 1/((0.1 - X[:, 3, :]) ** 2 + (0.06 - X[:, 7, :]) ** 2)
131
+
132
+ #MTVI2 = list(1.5*(1.2 * (i - j) - 2.5 * (k - j))* ((2 * i + 1)**2-(6 * i - 5 * k ** (1/2.0)) - 0.5)**(1/2.0) for i, j, k in zip(X[:, 7, :], X[:, 2, :], X[:, 3, :]))
133
+ MTVI2 = list(1.5*(1.2 * (i - j) - 2.5 * (k - j))* ((2 * i + 1)**2-(6 * i - 5 * k ** (1/2.0)) - 0.5)**(1/2.0) for i, j, k in zip(X["B08"], X["B03"], X["B04"]))
134
+ X["MTVI2"] = np.array(MTVI2)
135
+
136
+ X["NDSI"] = (X["B03"] - X["B11"]) / (X["B03"] + X["B11"])
137
+ #NDSI = (X[:, 2, :] - X[:, 10, :]) / (X[:, 2, :] + X[:, 10, :])
138
+
139
+
140
+
141
+ # DRYNESS / DROUGHT
142
+ X["NDMI"] = (X["B08"] - X["B11"])/(X["B08"] + X["B11"])
143
+ #NDMI = (X[:, 7, :] - X[:, 10, :])/(X[:, 7, :] + X[:, 10, :])
144
+
145
+ TNDVI = [(x)**(1/2.0) for x in ((X["B08"] - X["B04"]) / (X["B08"] + X["B04"]) + 0.5)]
146
+ X["TNDVI"] = np.array(TNDVI)
147
+
148
+
149
+ # GENERAL
150
+ X["TVI"] = (120 * (X["B06"] - X["B03"]) - 200 * (X["B04"] - X["B03"])) / 2
151
+ #TVI = (120 * (X[:, 5, :] - X[:, 2, :]) - 200 * (X[:, 3, :] - X[:, 2, :])) / 2
152
+ X["EXG"] = 2 * X["B03"] - X["B04"] - X["B02"]
153
+ #EXG = 2 * X[:, 2, :] - X[:, 3, :] - X[:, 1, :]
154
+ X["PSRI"] = (X["B04"] - X["B02"]) / X["B06"]
155
+ #PSRI = (X[:, 3, :] - X[:, 1, :]) / X[:, 5, :]
156
+
157
+ return X
processing.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from PIL import Image
3
+ import matplotlib.pyplot as plt
4
+ import numpy as np
5
+ import pandas as pd
6
+ from glob import glob
7
+ import os
8
+ import utm
9
+ import rasterio
10
+ from tqdm import tqdm
11
+ #from xml.etree import ElementTree as et
12
+ import xmltodict
13
+
14
+ ##
15
+ def cloud_masking(image,cld):
16
+ cloud_mask = cld > 30
17
+ band_mean = image.mean()
18
+ image[cloud_mask] = band_mean
19
+ return image
20
+
21
+ ##
22
+ def load_file(fp):
23
+ """Takes a PosixPath object or string filepath
24
+ and returns np array"""
25
+
26
+ return np.array(Image.open(fp.__str__()))
27
+
28
+ def paths (name):
29
+
30
+ fold_band_10 = glob(name+"/GRANULE/*/IMG_DATA/R10m")[0]
31
+ fold_band_20 = glob(name+"/GRANULE/*/IMG_DATA/R20m")[0]
32
+ fold_band_60 = glob(name+"/GRANULE/*/IMG_DATA/R60m")[0]
33
+ path = name+"/GRANULE/*/IMG_DATA/R10m"+"/*.jp2"
34
+ x = glob(path)
35
+ lists = x[0].split("/")[-1].split("_")
36
+ fixe = lists[0]+'_'+lists[1]
37
+
38
+ band_10 = ['B02', 'B03', 'B04','B08']
39
+ band_20 = ['B05', 'B06', 'B07','B8A','B11', 'B12']
40
+ band_60 = ['B01','B09']
41
+ images_name_10m = [fixe+"_"+band+"_10m.jp2" for band in band_10 ]
42
+ images_name_20m = [fixe+"_"+band+"_20m.jp2" for band in band_20 ]
43
+ images_name_60m = [fixe+"_"+band+"_60m.jp2" for band in band_60 ]
44
+ #
45
+ bandes_path_10 = [os.path.join(fold_band_10,img) for img in images_name_10m]
46
+ bandes_path_20 = [os.path.join(fold_band_20,img) for img in images_name_20m]
47
+ bandes_path_60 = [os.path.join(fold_band_60,img) for img in images_name_60m]
48
+ #
49
+ tile_path = name+"/INSPIRE.xml"
50
+ path_cld_20 = glob(name+"/GRANULE/*/QI_DATA/MSK_CLDPRB_20m.jp2")[0]
51
+ path_cld_60 = glob(name+"/GRANULE/*/QI_DATA/MSK_CLDPRB_60m.jp2")[0]
52
+
53
+ return bandes_path_10,bandes_path_20,bandes_path_60,tile_path,path_cld_20,path_cld_60
54
+
55
+ ##
56
+ def coords_to_pixels(ref, utm, m=10):
57
+ """ Convert UTM coordinates to pixel coordinates"""
58
+
59
+ x = int((utm[0] - ref[0])/m)
60
+ y = int((ref[1] - utm[1])/m)
61
+
62
+ return x, y
63
+
64
+ ##
65
+ def extract_sub_image(bandes_path,tile_path,area,resolution=10, d= 3, cld_path = None):
66
+
67
+ xml_file=open(tile_path,"r")
68
+ xml_string=xml_file.read()
69
+ python_dict=xmltodict.parse(xml_string)
70
+ tile_coordonnates = python_dict["gmd:MD_Metadata"]["gmd:identificationInfo"]["gmd:MD_DataIdentification"]["gmd:abstract"]["gco:CharacterString"].split()
71
+
72
+ # S2 tile coordonnates
73
+ lat,lon = float(tile_coordonnates[0]),float(tile_coordonnates[1])
74
+ tile_coordonnate = [lat,lon]
75
+
76
+ refx, refy, _, _ = utm.from_latlon(tile_coordonnate[0], tile_coordonnate[1])
77
+ ax,ay,_,_ = utm.from_latlon(area[1],area[0]) # lat,lon
78
+
79
+ ref = [refx, refy]
80
+ utm_cord = [ax,ay]
81
+ x,y = coords_to_pixels(ref,utm_cord,resolution)
82
+
83
+ images = []
84
+ # sub_image_extraction
85
+ for band_path in tqdm(bandes_path, total=len(bandes_path)):
86
+ image = load_file(band_path).astype(np.float32)
87
+ if resolution==60:
88
+ sub_image = image[y,x]
89
+ images.append(sub_image)
90
+
91
+ else:
92
+ sub_image = image[y-d:y+d,x-d:x+d]
93
+ images.append(sub_image)
94
+
95
+ images = np.array(images)
96
+
97
+
98
+ # verify if the study are is cloudy
99
+ if cld_path is not None:
100
+ cld_mask = load_file(cld_path).astype(np.float32)
101
+ cld = cld_mask[y-d:y+d,x-d:x+d]
102
+ # cloud removing
103
+ images = cloud_masking(images,cld)
104
+
105
+ if resolution==60:
106
+ return images
107
+ else:
108
+ return images.mean((1,2))
109
+
110
+
111
+ def ndvi(area, tile_name):
112
+ """
113
+ polygone: (lon,lat) format
114
+ tile_name: name of tile with the most low cloud coverage
115
+ """
116
+ #Extract tile coordonnates (lat,long)
117
+ tile_path = tile_name+"/INSPIRE.xml"
118
+ xml_file=open(tile_path,"r")
119
+ xml_string=xml_file.read()
120
+ python_dict=xmltodict.parse(xml_string)
121
+ tile_coordonnates = python_dict["gmd:MD_Metadata"]["gmd:identificationInfo"]["gmd:MD_DataIdentification"]["gmd:abstract"]["gco:CharacterString"].split()
122
+
123
+ # S2 tile coordonnates
124
+ lat,lon = float(tile_coordonnates[0]),float(tile_coordonnates[1])
125
+ tile_coordonnate = [lat,lon]
126
+
127
+ refx, refy, _, _ = utm.from_latlon(tile_coordonnate[0], tile_coordonnate[1])
128
+ ax,ay,_,_ = utm.from_latlon(area[1],area[0]) # lat,lon
129
+
130
+ ref = [refx, refy]
131
+ utm_cord = [ax,ay]
132
+ x,y = coords_to_pixels(ref,utm_cord)
133
+
134
+ # read images
135
+ path_4 = tile_name+"/GRANULE/*/IMG_DATA/R10m/*_B04_10m.jp2"
136
+ path_8 = tile_name+"/GRANULE/*/IMG_DATA/R10m/*_B08_10m.jp2"
137
+ red_object = rasterio.open(glob(path_4)[0])
138
+ nir_object = rasterio.open(glob(path_8)[0])
139
+ red = red_object.read()
140
+ nir = nir_object.read()
141
+ red,nir = red[0],nir[0]
142
+ # extract area and remove unsigne
143
+ sub_red = red[y-3:y+3,x-3:x+3].astype(np.float16)
144
+ sub_nir = nir[y-3:y+3,x-3:x+3].astype(np.float16)
145
+
146
+ # NDVI
147
+ ndvi_image = ((sub_nir - sub_red)/(sub_nir+sub_red))
148
+ ndvi_mean_value = ndvi_image.mean()
149
+
150
+ return ndvi_mean_value
151
+
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DateTime==5.2
2
+ geojson==3.0.1
3
+ matplotlib==3.8.0
4
+ numpy==1.26.0
5
+ pandas==2.1.1
6
+ Pillow==10.0.1
7
+ scipy==1.11.2
8
+ sentinelsat==1.2.1
9
+ tqdm==4.66.1
10
+ utm==0.7.0
11
+ xgboost==2.0.0
12
+ xmltodict==0.13.0
13
+ rasterio