Commit
·
b3e6969
1
Parent(s):
479026e
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,9 +8,9 @@ import matplotlib.colors as mcolors
|
|
| 8 |
# CHARGEMENT DES DONNEES
|
| 9 |
|
| 10 |
def load_data_antenna():
|
| 11 |
-
df_antenna = pd.read_csv(r'data/df_clean.csv', engine="pyarrow", dtype_backend="pyarrow")
|
| 12 |
df_antenna['DATE'] = df_antenna['DATE'].astype('datetime64[ns]')
|
| 13 |
-
df_antenna['HEURE'] = pd.to_datetime(df_antenna['HEURE'], format='%H:%M:%S').dt.time
|
| 14 |
df_antenna['ANNEE'] = df_antenna['ANNEE'].astype('int')
|
| 15 |
df_antenna['COMMUNE'] = df_antenna['COMMUNE'].astype('str')
|
| 16 |
df_antenna['LIEU_DIT'] = df_antenna['LIEU_DIT'].astype('str')
|
|
@@ -49,11 +49,11 @@ FEDER = "images/FEDER-NA.png"
|
|
| 49 |
PREFET = "images/Prefet_NA.jpg"
|
| 50 |
VERT = "images/FranceNationVerte.jpg"
|
| 51 |
|
| 52 |
-
with open("pages/home.md", "r", encoding="utf-8") as file:
|
| 53 |
root_md = file.read()
|
| 54 |
|
| 55 |
## PAGE DE PRESENTATION
|
| 56 |
-
with open("pages/page1.md", "r", encoding="utf-8") as file:
|
| 57 |
page1 = file.read()
|
| 58 |
|
| 59 |
## VISUALISATION DES DONNEES D'ANTENNES
|
|
@@ -63,87 +63,87 @@ genders = sorted(df_antenna['SEXE'].unique().tolist())
|
|
| 63 |
sites = sorted(df_antenna['LIEU_DIT'].unique().tolist())
|
| 64 |
dates = [df_antenna['DATE'].min(), df_antenna['DATE'].max()]
|
| 65 |
|
| 66 |
-
def create_trajectories_map(df, width=1200, height=1000):
|
| 67 |
if df.empty:
|
| 68 |
fig = go.Figure(go.Scattermapbox())
|
| 69 |
-
fig.update_layout(mapbox_style="carto-positron", mapbox_zoom=6, mapbox_center={"lat": 46.493889, "lon": 2.602778}, height=height, width=width)
|
| 70 |
return fig
|
| 71 |
|
| 72 |
-
|
| 73 |
-
center_lat, center_lon = df['LAT_WGS'].mean(), df['LONG_WGS'].mean()
|
| 74 |
|
| 75 |
fig = go.Figure()
|
| 76 |
|
| 77 |
# Extraction des liaisons uniques entre sites
|
| 78 |
-
unique_connections = df.sort_values('DATE').drop_duplicates(subset=['LAT_WGS', 'LONG_WGS'], keep='first')
|
| 79 |
|
| 80 |
# Tracer les liaisons
|
| 81 |
last_point = None
|
| 82 |
for _, row in unique_connections.iterrows():
|
| 83 |
if last_point is not None:
|
| 84 |
fig.add_trace(go.Scattermapbox(
|
| 85 |
-
lat=[last_point['LAT_WGS'], row['LAT_WGS']],
|
| 86 |
-
lon=[last_point['LONG_WGS'], row['LONG_WGS']],
|
| 87 |
-
mode='lines',
|
| 88 |
-
line=dict(color='violet', width=2),
|
| 89 |
-
showlegend=False
|
| 90 |
))
|
| 91 |
last_point = row
|
| 92 |
|
| 93 |
-
#
|
| 94 |
-
sites = df.drop_duplicates(subset=['LAT_WGS', 'LONG_WGS', 'LIEU_DIT'])
|
| 95 |
for _, site in sites.iterrows():
|
| 96 |
fig.add_trace(go.Scattermapbox(
|
| 97 |
-
lat=[site['LAT_WGS']],
|
| 98 |
-
lon=[site['LONG_WGS']],
|
| 99 |
-
mode='markers+text',
|
| 100 |
-
marker=go.scattermapbox.Marker(size=9, color='blue'),
|
| 101 |
-
textfont=dict(color='black'), #
|
| 102 |
-
text=site['LIEU_DIT'],
|
| 103 |
-
textposition="bottom right",
|
| 104 |
-
name=site['LIEU_DIT'],
|
| 105 |
-
showlegend=False
|
| 106 |
))
|
| 107 |
|
| 108 |
fig.update_layout(
|
| 109 |
-
mapbox_style="carto-positron",
|
| 110 |
-
mapbox_zoom=6,
|
| 111 |
-
mapbox_center={"lat": center_lat, "lon": center_lon},
|
| 112 |
-
height=height,
|
| 113 |
-
width=width
|
| 114 |
)
|
| 115 |
return fig
|
| 116 |
|
| 117 |
-
def create_transition_matrix(df, remove_self_loops=True, reduce_self_loops=False, reduction_factor=0.5):
|
| 118 |
-
#
|
| 119 |
-
df = df.sort_values(by=['NUM_PIT', 'DATE'])
|
| 120 |
|
| 121 |
# Créer une colonne pour le lieu précédent
|
| 122 |
df['LIEU_PRECEDENT'] = df.groupby('NUM_PIT')['LIEU_DIT'].shift()
|
| 123 |
|
| 124 |
# Filtrer pour obtenir seulement les transitions valides (non nulles)
|
| 125 |
-
df_transitions = df.dropna(subset=['LIEU_PRECEDENT'])
|
| 126 |
|
| 127 |
# Compter les transitions de chaque lieu vers un autre
|
| 128 |
-
transition_counts = df_transitions.groupby(['LIEU_PRECEDENT', 'LIEU_DIT']).size().reset_index(name='count')
|
| 129 |
|
| 130 |
# Retirer les transitions où source == target si demandé
|
| 131 |
if remove_self_loops:
|
| 132 |
transition_counts = transition_counts[transition_counts['LIEU_PRECEDENT'] != transition_counts['LIEU_DIT']]
|
| 133 |
|
| 134 |
-
# Réduire le poids des transitions de recontrôle si demandé
|
| 135 |
if reduce_self_loops:
|
| 136 |
transition_counts.loc[transition_counts['LIEU_PRECEDENT'] == transition_counts['LIEU_DIT'], 'count'] *= reduction_factor
|
| 137 |
|
| 138 |
# Construire une matrice de transition
|
| 139 |
lieux = sorted(set(df['LIEU_DIT'].unique()) | set(df['LIEU_PRECEDENT'].dropna().unique()))
|
| 140 |
-
transition_matrix = pd.DataFrame(0, index=lieux, columns=lieux)
|
| 141 |
|
| 142 |
for _, row in transition_counts.iterrows():
|
| 143 |
transition_matrix.at[row['LIEU_PRECEDENT'], row['LIEU_DIT']] = row['count']
|
| 144 |
|
| 145 |
return transition_matrix, lieux
|
| 146 |
-
|
|
|
|
| 147 |
# Transformer la matrice en table de connexions
|
| 148 |
transition_table = transition_matrix.stack().reset_index()
|
| 149 |
transition_table.columns = ['source', 'target', 'count']
|
|
@@ -161,26 +161,23 @@ def process_transition_matrix(transition_matrix, df, threshold = 9):
|
|
| 161 |
# Séparer les paires de sites dans des colonnes distinctes
|
| 162 |
transition_table[['source', 'target']] = pd.DataFrame(transition_table['site_pair'].tolist(), index=transition_table.index)
|
| 163 |
|
| 164 |
-
# Filtrer pour ne garder que les connexions avec au moins
|
| 165 |
-
transition_table = transition_table[transition_table['count'] > threshold]
|
| 166 |
|
| 167 |
# Obtenir les limites pour normaliser les valeurs de count
|
| 168 |
-
min_count = transition_table['count'].min()
|
| 169 |
-
max_count = transition_table['count'].max()
|
| 170 |
mean_count = transition_table['count'].mean()
|
| 171 |
std_count = transition_table['count'].std()
|
| 172 |
|
| 173 |
# Normaliser les valeurs de count entre 0 et 1
|
| 174 |
-
|
| 175 |
-
transition_table['normalized_count'] = (transition_table['count'] - mean_count) / std_count # Centrer-normer
|
| 176 |
|
| 177 |
-
#
|
| 178 |
cmap = mcolors.LinearSegmentedColormap.from_list(
|
| 179 |
"CustomGreenYellowOrangeRed",
|
| 180 |
["#808080", "#4B4B4B", "#FC4C02", "#FF8C00", "#FF0000"]
|
| 181 |
)
|
| 182 |
|
| 183 |
-
# Calculer les couleurs basées sur les counts
|
| 184 |
transition_table['color'] = transition_table['normalized_count'].apply(lambda x: mcolors.to_hex(cmap(x)))
|
| 185 |
|
| 186 |
# Fusionner les coordonnées du df dans transition_table pour source et target
|
|
@@ -189,50 +186,51 @@ def process_transition_matrix(transition_matrix, df, threshold = 9):
|
|
| 189 |
|
| 190 |
transition_table = transition_table.merge(coords, left_on='source', right_index=True)
|
| 191 |
transition_table = transition_table.merge(coords, left_on='target', right_index=True, suffixes=('_source', '_target'))
|
| 192 |
-
|
| 193 |
return transition_table
|
| 194 |
-
|
|
|
|
| 195 |
fig = go.Figure()
|
| 196 |
|
| 197 |
# Tracer chaque connexion individuellement avec sa couleur
|
| 198 |
for _, row in transition_table.iterrows():
|
| 199 |
fig.add_trace(go.Scattermapbox(
|
| 200 |
-
lat=[row['LAT_WGS_source'], row['LAT_WGS_target']],
|
| 201 |
-
lon=[row['LONG_WGS_source'], row['LONG_WGS_target']],
|
| 202 |
-
mode='lines',
|
| 203 |
-
line=dict(width=1, color=row['color']), # Utiliser la couleur calculée
|
| 204 |
-
hovertext=f"Count: {row['count']}",
|
| 205 |
-
hoverinfo='text',
|
| 206 |
-
showlegend=False
|
| 207 |
))
|
| 208 |
|
| 209 |
# Ajouter tous les marqueurs pour les lieux d'un seul coup
|
| 210 |
coords = df[['LIEU_DIT', 'LAT_WGS', 'LONG_WGS']].drop_duplicates()
|
| 211 |
for _, coord in coords.iterrows():
|
| 212 |
fig.add_trace(go.Scattermapbox(
|
| 213 |
-
lat=[coord['LAT_WGS']],
|
| 214 |
-
lon=[coord['LONG_WGS']],
|
| 215 |
-
mode='markers+text',
|
| 216 |
-
marker=go.scattermapbox.Marker(
|
| 217 |
-
size=6,
|
| 218 |
-
color='#8467D7', # Violet clair
|
| 219 |
),
|
| 220 |
-
textfont=dict(color='grey'),
|
| 221 |
-
text=[coord['LIEU_DIT']],
|
| 222 |
-
textposition="bottom right",
|
| 223 |
-
showlegend=False
|
| 224 |
))
|
| 225 |
|
| 226 |
# Définir le centre de la carte
|
| 227 |
center_lat, center_lon = df['LAT_WGS'].mean(), df['LONG_WGS'].mean()
|
| 228 |
|
| 229 |
fig.update_layout(
|
| 230 |
-
mapbox_style="carto-positron", # Utiliser le style de carte noir et blanc
|
| 231 |
-
mapbox_zoom=6,
|
| 232 |
-
mapbox_center={"lat": center_lat, "lon": center_lon},
|
| 233 |
-
font=dict(color='black'),
|
| 234 |
-
height=height,
|
| 235 |
-
width=width
|
| 236 |
)
|
| 237 |
|
| 238 |
return fig
|
|
@@ -243,45 +241,39 @@ create_trajectories_map_from_matrix(df_antenna, transition_table)
|
|
| 243 |
|
| 244 |
map_section = create_trajectories_map(df_empty)
|
| 245 |
|
| 246 |
-
with open("pages/page2.md", "r", encoding="utf-8") as file:
|
| 247 |
page2 = file.read()
|
| 248 |
|
| 249 |
def refresh_button(state):
|
| 250 |
df_filtered = df_antenna.copy()
|
| 251 |
|
| 252 |
-
#
|
| 253 |
-
print("Selected departments:", state.selected_dpt)
|
| 254 |
-
print("Selected species:", state.selected_sp)
|
| 255 |
-
print("Selected sites:", state.selected_sites)
|
| 256 |
-
print("Selected dates:", state.selected_dates)
|
| 257 |
-
|
| 258 |
-
# Filter by department
|
| 259 |
if state.selected_dpt:
|
| 260 |
equipped_pit = df_filtered[(df_filtered['ACTION'] == 'T') &
|
| 261 |
(df_filtered['DEPARTEMENT'].isin(state.selected_dpt))]['NUM_PIT'].unique()
|
| 262 |
df_filtered = df_filtered[df_filtered['NUM_PIT'].isin(equipped_pit)]
|
| 263 |
|
| 264 |
-
#
|
| 265 |
if state.selected_sp:
|
| 266 |
df_filtered = df_filtered[df_filtered['CODE_ESP'].isin(state.selected_sp)]
|
| 267 |
|
| 268 |
-
#
|
| 269 |
if state.selected_gender:
|
| 270 |
df_filtered = df_filtered[df_filtered['SEXE'].isin(state.selected_gender)]
|
| 271 |
|
| 272 |
-
#
|
| 273 |
if state.selected_sites:
|
| 274 |
site_pit = df_filtered[(df_filtered['ACTION'] == 'T') &
|
| 275 |
(df_filtered['LIEU_DIT'].isin(state.selected_sites))]['NUM_PIT'].unique()
|
| 276 |
df_filtered = df_filtered[df_filtered['NUM_PIT'].isin(site_pit)]
|
| 277 |
|
| 278 |
-
#
|
| 279 |
if state.selected_dates and len(state.selected_dates) == 2:
|
| 280 |
start_date = pd.Timestamp(state.selected_dates[0])
|
| 281 |
end_date = pd.Timestamp(state.selected_dates[1])
|
| 282 |
df_filtered = df_filtered[(df_filtered['DATE'] >= start_date) & (df_filtered['DATE'] <= end_date)]
|
| 283 |
|
| 284 |
-
#
|
| 285 |
transition_matrix_filtered, labels_filtered = create_transition_matrix(df_filtered)
|
| 286 |
transition_table_filtered = process_transition_matrix(transition_matrix_filtered, df_filtered)
|
| 287 |
state.map_section = create_trajectories_map_from_matrix(df_filtered, transition_table_filtered)
|
|
@@ -289,7 +281,7 @@ def refresh_button(state):
|
|
| 289 |
## PHENOLOGIES DES SITES
|
| 290 |
|
| 291 |
def gant_diagram(df_original):
|
| 292 |
-
# Créer une copie du DataFrame original
|
| 293 |
df_filtered = df_original[df_original['ACTION'] == "C"].copy()
|
| 294 |
|
| 295 |
# Convertir la colonne DATE en datetime et localiser en UTC
|
|
@@ -297,237 +289,242 @@ def gant_diagram(df_original):
|
|
| 297 |
df_filtered['DATE'] = df_filtered['DATE'].dt.tz_localize('UTC')
|
| 298 |
|
| 299 |
# Trier les valeurs par DATE
|
| 300 |
-
df_filtered = df_filtered.sort_values(by='DATE', ascending=True)
|
| 301 |
|
| 302 |
# Ajouter des colonnes pour les visites précédentes et les nouvelles visites
|
| 303 |
df_filtered['Prev_DATE'] = df_filtered.groupby(['LIEU_DIT'])['DATE'].shift(1)
|
| 304 |
df_filtered['New_Visit'] = (df_filtered['DATE'] - df_filtered['Prev_DATE']).dt.days > 1
|
| 305 |
-
df_filtered['New_Visit'].fillna(True, inplace=True)
|
| 306 |
df_filtered['Visit_ID'] = df_filtered.groupby(['LIEU_DIT'])['New_Visit'].cumsum()
|
| 307 |
|
| 308 |
# Supprimer les LIEU_DIT qui n'ont aucune visite détectée
|
| 309 |
-
df_filtered = df_filtered.dropna(subset=['Visit_ID'])
|
| 310 |
|
| 311 |
# Calculer min/max dates pour chaque visite
|
| 312 |
result = df_filtered.groupby(['LIEU_DIT', 'Visit_ID']).agg(
|
| 313 |
-
Start=pd.NamedAgg(column='DATE', aggfunc='min'),
|
| 314 |
-
Finish=pd.NamedAgg(column='DATE', aggfunc='max'),
|
| 315 |
-
Departement=pd.NamedAgg(column='DEPARTEMENT', aggfunc='first')
|
| 316 |
).reset_index()
|
| 317 |
|
| 318 |
# Supprimer les LIEU_DIT qui n'ont pas de dates valides
|
| 319 |
-
result = result.dropna(subset=['Start', 'Finish'])
|
| 320 |
|
| 321 |
# Filtrer les LIEU_DIT qui ont des résultats
|
| 322 |
result = result[result['Finish'] > result['Start']]
|
| 323 |
|
| 324 |
# Trier les sites par ordre alphabétique et les départements pour la légende
|
| 325 |
-
result = result.sort_values(by=['Departement', 'LIEU_DIT'])
|
| 326 |
-
|
| 327 |
-
# Assurez-vous que les colonnes de date sont au format datetime si ce n'est pas déjà le cas
|
| 328 |
result['Start'] = pd.to_datetime(result['Start'])
|
| 329 |
result['Finish'] = pd.to_datetime(result['Finish'])
|
| 330 |
|
| 331 |
# Déterminer la hauteur de la figure en fonction du nombre de sites
|
| 332 |
num_sites = result['LIEU_DIT'].nunique()
|
| 333 |
-
height_per_site = 50
|
| 334 |
-
base_height = 100
|
| 335 |
-
max_height = 20000
|
| 336 |
fig_height = min(base_height + num_sites * height_per_site, max_height)
|
| 337 |
|
| 338 |
# Créer le diagramme de Gantt
|
| 339 |
fig = px.timeline(
|
| 340 |
result,
|
| 341 |
-
x_start='Start',
|
| 342 |
-
x_end='Finish',
|
| 343 |
-
y='LIEU_DIT',
|
| 344 |
-
color='Departement', # Changer la couleur en fonction du département
|
| 345 |
-
color_discrete_sequence=px.colors.qualitative.Plotly,
|
| 346 |
-
labels={'LIEU_DIT': 'Site'},
|
| 347 |
-
title='Présence sur chaque site'
|
| 348 |
)
|
| 349 |
|
| 350 |
-
#
|
| 351 |
start_year = result['Start'].dt.year.min()
|
| 352 |
end_year = result['Finish'].dt.year.max()
|
| 353 |
for year in range(start_year, end_year + 1):
|
| 354 |
fig.add_vline(
|
| 355 |
-
x=pd.Timestamp(f'{year}-01-01'),
|
| 356 |
-
line=dict(color='grey', dash='dash', width=1)
|
| 357 |
)
|
| 358 |
|
| 359 |
-
|
| 360 |
-
fig.update_layout(legend=dict(traceorder="normal"), margin=dict(l=300))
|
| 361 |
|
| 362 |
# Mise à jour des étiquettes et de l'orientation de l'axe des ordonnées
|
| 363 |
-
fig.update_yaxes(categoryorder='total ascending') #
|
| 364 |
fig.update_layout(
|
| 365 |
-
xaxis_title='Date',
|
| 366 |
-
yaxis_title='Site',
|
| 367 |
-
xaxis=dict(tickformat='%Y-%m-%d'),
|
| 368 |
-
height=fig_height,
|
| 369 |
-
legend=dict(bgcolor='rgba(0, 0, 0, 0)')
|
| 370 |
)
|
| 371 |
|
| 372 |
return fig
|
| 373 |
|
| 374 |
gant_global = gant_diagram(df_antenna)
|
| 375 |
|
| 376 |
-
with open("pages/page3.md", "r", encoding="utf-8") as file:
|
| 377 |
page3 = file.read()
|
| 378 |
|
| 379 |
def update_gant(state):
|
| 380 |
df_filtered_gant = df_antenna.copy()
|
| 381 |
df_filtered_gant = df_filtered_gant[df_filtered_gant['ACTION'] == "C"]
|
|
|
|
| 382 |
# Convertir les dates sélectionnées en objets Timestamp
|
| 383 |
if state.selected_dpt_gant:
|
| 384 |
df_filtered_gant = df_filtered_gant[df_filtered_gant['DEPARTEMENT'].isin(state.selected_dpt_gant)]
|
|
|
|
| 385 |
if state.dates_gant and len(state.dates_gant) == 2:
|
| 386 |
-
# Mettre de la cohérence dans les formats dates dans state.dates_gant et s'assurer que ce sont des objets Timestamp
|
| 387 |
start_date = pd.Timestamp(state.dates_gant[0])
|
| 388 |
end_date = pd.Timestamp(state.dates_gant[1])
|
|
|
|
| 389 |
# Filtrer les dates
|
| 390 |
df_filtered_gant = df_filtered_gant[(df_filtered_gant['DATE'] >= start_date) & (df_filtered_gant['DATE'] <= end_date)]
|
|
|
|
| 391 |
state.gant_global = gant_diagram(df_filtered_gant)
|
| 392 |
|
| 393 |
## STATISTIQUES ANALYTIQUES
|
| 394 |
|
| 395 |
def detection_by_year(df_antenna):
|
| 396 |
df_antenna['YEAR'] = df_antenna['DATE'].dt.year
|
| 397 |
-
grouped_data = df_antenna.groupby(['YEAR', 'CODE_ESP']).size().reset_index(name='Detections')
|
| 398 |
|
| 399 |
-
fig = px.bar(grouped_data, x='YEAR', y='Detections', color='CODE_ESP',
|
| 400 |
-
labels={'Detections': 'Nombre de détections', 'CODE_ESP': 'Espèce'},
|
| 401 |
)
|
| 402 |
|
| 403 |
fig.update_layout(
|
| 404 |
-
height=400,
|
| 405 |
-
width=400,
|
| 406 |
-
yaxis_title='Nombre de détections',
|
| 407 |
-
xaxis_title=None,
|
| 408 |
-
xaxis={'type': 'category', 'tickmode': 'linear'}, # Afficher toutes les années
|
| 409 |
-
barmode='stack',
|
| 410 |
-
legend=dict(
|
| 411 |
-
bgcolor='rgba(0, 0, 0, 0)',
|
| 412 |
-
orientation='h', # Légende horizontale
|
| 413 |
-
x=0.5, # Centrer la légende horizontalement
|
| 414 |
-
y
|
| 415 |
-
xanchor='center', # Ancrer la légende au centre
|
| 416 |
-
yanchor='top' # Ancrer la légende au-dessus de l'axe x
|
| 417 |
),
|
| 418 |
-
margin=dict(b=100)
|
| 419 |
)
|
| 420 |
|
| 421 |
return fig
|
|
|
|
| 422 |
def capture_by_year(df_antenna):
|
| 423 |
df_filtre = df_antenna[df_antenna['ACTION'] == 'T']
|
| 424 |
df_filtre['YEAR'] = df_filtre['DATE'].dt.year
|
| 425 |
df_grouped = df_filtre.groupby(['YEAR', 'CODE_ESP'])['NUM_PIT'].nunique().reset_index()
|
| 426 |
-
df_grouped.rename(columns={'NUM_PIT': 'Nombre d\'individus uniques'}, inplace=True)
|
| 427 |
|
| 428 |
-
fig = px.bar(df_grouped, x='YEAR', y='Nombre d\'individus uniques', color='CODE_ESP',
|
| 429 |
-
labels={'Nombre d\'individus uniques': 'Nombre d\'individus uniques', 'CODE_ESP': 'Espèce'}
|
| 430 |
)
|
| 431 |
|
| 432 |
fig.update_layout(
|
| 433 |
-
height=400,
|
| 434 |
-
width=400,
|
| 435 |
-
yaxis_title='Nombre d\'individus uniques',
|
| 436 |
-
xaxis_title=None,
|
| 437 |
-
xaxis={'type': 'category', 'tickmode': 'linear'}, # Afficher toutes les années
|
| 438 |
-
legend=dict(
|
| 439 |
-
bgcolor='rgba(0, 0, 0, 0)',
|
| 440 |
-
orientation='h', # Légende horizontale
|
| 441 |
-
x=0.5, # Centrer la légende horizontalement
|
| 442 |
-
y
|
| 443 |
-
xanchor='center', # Ancrer la légende au centre
|
| 444 |
-
yanchor='top' # Ancrer la légende au-dessus de l'axe x
|
| 445 |
),
|
| 446 |
-
margin=dict(b=100)
|
| 447 |
)
|
| 448 |
|
| 449 |
return fig
|
|
|
|
| 450 |
def control_by_year(df_antenna):
|
| 451 |
df_filtre = df_antenna[df_antenna['ACTION'] == 'C']
|
| 452 |
df_filtre['YEAR'] = df_filtre['DATE'].dt.year
|
| 453 |
df_grouped = df_filtre.groupby(['YEAR', 'CODE_ESP'])['NUM_PIT'].nunique().reset_index()
|
| 454 |
-
df_grouped.rename(columns={'NUM_PIT': 'Nombre d\'individus'}, inplace=True)
|
| 455 |
|
| 456 |
-
fig = px.bar(df_grouped, x='YEAR', y='Nombre d\'individus', color='CODE_ESP',
|
| 457 |
-
labels={'Nombre d\'individus': 'Nombre d\'individus', 'CODE_ESP': 'Espèce'},
|
| 458 |
)
|
| 459 |
|
| 460 |
fig.update_layout(
|
| 461 |
-
height=400,
|
| 462 |
-
width=400,
|
| 463 |
-
yaxis_title='Nombre d\'individus',
|
| 464 |
-
xaxis_title=None,
|
| 465 |
-
xaxis={'type': 'category', 'tickmode': 'linear'}, # Afficher toutes les années
|
| 466 |
-
legend=dict(
|
| 467 |
-
bgcolor='rgba(0, 0, 0, 0)',
|
| 468 |
-
orientation='h', # Légende horizontale
|
| 469 |
-
x=0.5, # Centrer la légende horizontalement
|
| 470 |
-
y
|
| 471 |
-
xanchor='center', # Ancrer la légende au centre
|
| 472 |
-
yanchor='top' # Ancrer la légende au-dessus de l'axe x
|
| 473 |
),
|
| 474 |
-
margin=dict(b=100)
|
| 475 |
)
|
| 476 |
|
| 477 |
return fig
|
|
|
|
| 478 |
def detection_frequencies(df_antenna):
|
| 479 |
-
df_antenna['MONTH_DAY'] = df_antenna['DATE'].dt.strftime('%m-%d')
|
| 480 |
-
global_freq = df_antenna.groupby('MONTH_DAY').size().reset_index(name='Global Detections')
|
| 481 |
|
| 482 |
# Calculer les fréquences par site
|
| 483 |
-
site_freq = df_antenna.groupby(['MONTH_DAY', 'LIEU_DIT']).size().reset_index(name='Detections')
|
| 484 |
sites = site_freq['LIEU_DIT'].unique()
|
| 485 |
|
| 486 |
# Préparer l'ordre chronologique
|
| 487 |
months_days = pd.date_range('2021-01-01', '2021-12-31').strftime('%m-%d')
|
| 488 |
-
global_freq['MONTH_DAY'] = pd.Categorical(global_freq['MONTH_DAY'], categories=months_days, ordered=True)
|
| 489 |
global_freq = global_freq.sort_values('MONTH_DAY')
|
| 490 |
|
| 491 |
# Création du graphique
|
| 492 |
fig = go.Figure()
|
| 493 |
|
| 494 |
# Ajouter la courbe globale
|
| 495 |
-
fig.add_trace(go.Scatter(x=global_freq['MONTH_DAY'], y=global_freq['Global Detections'],
|
| 496 |
-
mode='lines', name='Global'))
|
| 497 |
|
| 498 |
# Ajouter une courbe pour chaque site
|
| 499 |
for site in sites:
|
| 500 |
site_data = site_freq[site_freq['LIEU_DIT'] == site]
|
| 501 |
-
site_data['MONTH_DAY'] = pd.Categorical(site_data['MONTH_DAY'], categories=months_days, ordered=True)
|
| 502 |
site_data = site_data.sort_values('MONTH_DAY')
|
| 503 |
-
fig.add_trace(go.Scatter(x=site_data['MONTH_DAY'], y=site_data['Detections'],
|
| 504 |
-
mode='lines', name=site))
|
| 505 |
|
| 506 |
# Mise à jour du layout
|
| 507 |
fig.update_layout(
|
| 508 |
-
xaxis_title='Jour de l\'année',
|
| 509 |
-
yaxis_title='Nombre de détections',
|
| 510 |
-
xaxis=dict(type='category', categoryorder='array', categoryarray=[md for md in months_days]),
|
| 511 |
-
legend=dict(bgcolor='rgba(0, 0, 0, 0)'),
|
| 512 |
-
#yaxis=dict(range=[0, global_freq['Global Detections'].max() + 10])
|
| 513 |
)
|
| 514 |
return fig
|
|
|
|
| 515 |
def pie_controled(df_antenna):
|
| 516 |
species_counts = df_antenna[df_antenna['ACTION'] == 'C']['CODE_ESP'].value_counts().reset_index()
|
| 517 |
species_counts.columns = ['Species', 'Count']
|
| 518 |
|
| 519 |
# Créer un diagramme circulaire
|
| 520 |
fig = px.pie(species_counts,
|
| 521 |
-
values='Count', names='Species',
|
| 522 |
-
color_discrete_sequence=px.colors.qualitative.Pastel,
|
| 523 |
hole = 0.5,)
|
| 524 |
|
| 525 |
# Personnalisation supplémentaire
|
| 526 |
-
fig.update_layout(legend_title_text='Espèce',
|
| 527 |
showlegend = True,
|
| 528 |
-
legend=dict(bgcolor='rgba(0, 0, 0, 0)')
|
| 529 |
)
|
| 530 |
return fig
|
|
|
|
| 531 |
def pie_marked(df_antenna):
|
| 532 |
marked_species = df_antenna[df_antenna['ACTION'] == 'T']
|
| 533 |
species_counts = marked_species['CODE_ESP'].value_counts().reset_index()
|
|
@@ -535,16 +532,17 @@ def pie_marked(df_antenna):
|
|
| 535 |
|
| 536 |
# Créer un diagramme circulaire
|
| 537 |
fig = px.pie(species_counts,
|
| 538 |
-
values='Count', names='Species',
|
| 539 |
-
color_discrete_sequence=px.colors.qualitative.Pastel,
|
| 540 |
hole = 0.5)
|
| 541 |
|
| 542 |
# Personnalisation supplémentaire
|
| 543 |
-
fig.update_layout(legend_title_text='Espèce',
|
| 544 |
showlegend = True,
|
| 545 |
-
legend=dict(bgcolor='rgba(0, 0, 0, 0)')
|
| 546 |
)
|
| 547 |
return fig
|
|
|
|
| 548 |
def top_detection(df_antenna):
|
| 549 |
df_antenna['NUM_PIT'] = "n° " + df_antenna['NUM_PIT'].astype(str)
|
| 550 |
|
|
@@ -553,12 +551,12 @@ def top_detection(df_antenna):
|
|
| 553 |
|
| 554 |
# Créer un DataFrame à partir des dix premières catégories
|
| 555 |
df_top_categories = pd.DataFrame({'NUM_PIT': top_categories.index, 'Occurrences': top_categories.values})
|
| 556 |
-
df_top_categories = df_top_categories.sort_values(by='Occurrences', ascending=False)
|
| 557 |
|
| 558 |
-
# Créer le diagramme en barres horizontales
|
| 559 |
-
fig = px.bar(df_top_categories, x='Occurrences', y='NUM_PIT', orientation='h',
|
| 560 |
-
labels={'Occurrences': "Nombre d'occurrences", 'NUM_PIT': ''},
|
| 561 |
-
color='NUM_PIT', color_discrete_sequence=px.colors.qualitative.Set3)
|
| 562 |
|
| 563 |
# Ajuster la hauteur en fonction du nombre de catégories (avec une hauteur minimale de 400 pixels)
|
| 564 |
bar_height = 50 # Hauteur de chaque barre
|
|
@@ -567,31 +565,33 @@ def top_detection(df_antenna):
|
|
| 567 |
|
| 568 |
# Mettre à jour la mise en page avec des marges ajustées
|
| 569 |
fig.update_layout(
|
| 570 |
-
height=calculated_height,
|
| 571 |
-
showlegend=False,
|
| 572 |
-
margin=dict(l=200),
|
| 573 |
-
legend=dict(bgcolor='rgba(0, 0, 0, 0)')
|
| 574 |
)
|
| 575 |
|
| 576 |
return fig
|
| 577 |
|
| 578 |
-
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
-
|
| 585 |
-
|
| 586 |
-
|
| 587 |
-
|
| 588 |
-
|
| 589 |
-
|
| 590 |
-
|
| 591 |
-
|
| 592 |
-
|
| 593 |
-
|
| 594 |
-
|
|
|
|
|
|
|
| 595 |
page4 = file.read()
|
| 596 |
|
| 597 |
# DEMARRAGE DE L'APPLICATION
|
|
@@ -603,5 +603,5 @@ pages = {
|
|
| 603 |
"statistiques": page4
|
| 604 |
}
|
| 605 |
|
| 606 |
-
gui = Gui(pages=pages, css_file="assets/styles.css")
|
| 607 |
-
gui.run(host=
|
|
|
|
| 8 |
# CHARGEMENT DES DONNEES
|
| 9 |
|
| 10 |
def load_data_antenna():
|
| 11 |
+
df_antenna = pd.read_csv(r'data/df_clean.csv', engine = "pyarrow", dtype_backend = "pyarrow")
|
| 12 |
df_antenna['DATE'] = df_antenna['DATE'].astype('datetime64[ns]')
|
| 13 |
+
df_antenna['HEURE'] = pd.to_datetime(df_antenna['HEURE'], format = '%H:%M:%S').dt.time
|
| 14 |
df_antenna['ANNEE'] = df_antenna['ANNEE'].astype('int')
|
| 15 |
df_antenna['COMMUNE'] = df_antenna['COMMUNE'].astype('str')
|
| 16 |
df_antenna['LIEU_DIT'] = df_antenna['LIEU_DIT'].astype('str')
|
|
|
|
| 49 |
PREFET = "images/Prefet_NA.jpg"
|
| 50 |
VERT = "images/FranceNationVerte.jpg"
|
| 51 |
|
| 52 |
+
with open("pages/home.md", "r", encoding = "utf-8") as file:
|
| 53 |
root_md = file.read()
|
| 54 |
|
| 55 |
## PAGE DE PRESENTATION
|
| 56 |
+
with open("pages/page1.md", "r", encoding = "utf-8") as file:
|
| 57 |
page1 = file.read()
|
| 58 |
|
| 59 |
## VISUALISATION DES DONNEES D'ANTENNES
|
|
|
|
| 63 |
sites = sorted(df_antenna['LIEU_DIT'].unique().tolist())
|
| 64 |
dates = [df_antenna['DATE'].min(), df_antenna['DATE'].max()]
|
| 65 |
|
| 66 |
+
def create_trajectories_map(df, width = 1200, height = 1000):
|
| 67 |
if df.empty:
|
| 68 |
fig = go.Figure(go.Scattermapbox())
|
| 69 |
+
fig.update_layout(mapbox_style = "carto-positron", mapbox_zoom = 6, mapbox_center = {"lat": 46.493889, "lon": 2.602778}, height = height, width = width)
|
| 70 |
return fig
|
| 71 |
|
| 72 |
+
center_lat, center_lon = df['LAT_WGS'].mean(), df['LONG_WGS'].mean() # Centre de la carte définie sur la moyenne des coordonnées des points
|
|
|
|
| 73 |
|
| 74 |
fig = go.Figure()
|
| 75 |
|
| 76 |
# Extraction des liaisons uniques entre sites
|
| 77 |
+
unique_connections = df.sort_values('DATE').drop_duplicates(subset = ['LAT_WGS', 'LONG_WGS'], keep = 'first')
|
| 78 |
|
| 79 |
# Tracer les liaisons
|
| 80 |
last_point = None
|
| 81 |
for _, row in unique_connections.iterrows():
|
| 82 |
if last_point is not None:
|
| 83 |
fig.add_trace(go.Scattermapbox(
|
| 84 |
+
lat = [last_point['LAT_WGS'], row['LAT_WGS']],
|
| 85 |
+
lon = [last_point['LONG_WGS'], row['LONG_WGS']],
|
| 86 |
+
mode = 'lines',
|
| 87 |
+
line = dict(color = 'violet', width = 2),
|
| 88 |
+
showlegend = False
|
| 89 |
))
|
| 90 |
last_point = row
|
| 91 |
|
| 92 |
+
# Ajout des marqueurs pour les sites
|
| 93 |
+
sites = df.drop_duplicates(subset = ['LAT_WGS', 'LONG_WGS', 'LIEU_DIT'])
|
| 94 |
for _, site in sites.iterrows():
|
| 95 |
fig.add_trace(go.Scattermapbox(
|
| 96 |
+
lat = [site['LAT_WGS']],
|
| 97 |
+
lon = [site['LONG_WGS']],
|
| 98 |
+
mode = 'markers+text',
|
| 99 |
+
marker = go.scattermapbox.Marker(size=9, color='blue'),
|
| 100 |
+
textfont = dict(color = 'black'), # Couleur du texte
|
| 101 |
+
text = site['LIEU_DIT'],
|
| 102 |
+
textposition = "bottom right",
|
| 103 |
+
name = site['LIEU_DIT'], # Utiliser la bonne étiquette comme nom de site
|
| 104 |
+
showlegend = False
|
| 105 |
))
|
| 106 |
|
| 107 |
fig.update_layout(
|
| 108 |
+
mapbox_style = "carto-positron", # Utilisation d'un fond de carte en noir et blanc
|
| 109 |
+
mapbox_zoom = 6,
|
| 110 |
+
mapbox_center = {"lat": center_lat, "lon": center_lon},
|
| 111 |
+
height = height,
|
| 112 |
+
width = width
|
| 113 |
)
|
| 114 |
return fig
|
| 115 |
|
| 116 |
+
def create_transition_matrix(df, remove_self_loops = True, reduce_self_loops = False, reduction_factor = 0.5):
|
| 117 |
+
# Tri par individu et date pour suivre les transitions
|
| 118 |
+
df = df.sort_values(by = ['NUM_PIT', 'DATE'])
|
| 119 |
|
| 120 |
# Créer une colonne pour le lieu précédent
|
| 121 |
df['LIEU_PRECEDENT'] = df.groupby('NUM_PIT')['LIEU_DIT'].shift()
|
| 122 |
|
| 123 |
# Filtrer pour obtenir seulement les transitions valides (non nulles)
|
| 124 |
+
df_transitions = df.dropna(subset = ['LIEU_PRECEDENT'])
|
| 125 |
|
| 126 |
# Compter les transitions de chaque lieu vers un autre
|
| 127 |
+
transition_counts = df_transitions.groupby(['LIEU_PRECEDENT', 'LIEU_DIT']).size().reset_index(name = 'count')
|
| 128 |
|
| 129 |
# Retirer les transitions où source == target si demandé
|
| 130 |
if remove_self_loops:
|
| 131 |
transition_counts = transition_counts[transition_counts['LIEU_PRECEDENT'] != transition_counts['LIEU_DIT']]
|
| 132 |
|
| 133 |
+
# Réduire le poids des transitions de recontrôle (si demandé)
|
| 134 |
if reduce_self_loops:
|
| 135 |
transition_counts.loc[transition_counts['LIEU_PRECEDENT'] == transition_counts['LIEU_DIT'], 'count'] *= reduction_factor
|
| 136 |
|
| 137 |
# Construire une matrice de transition
|
| 138 |
lieux = sorted(set(df['LIEU_DIT'].unique()) | set(df['LIEU_PRECEDENT'].dropna().unique()))
|
| 139 |
+
transition_matrix = pd.DataFrame(0, index = lieux, columns = lieux)
|
| 140 |
|
| 141 |
for _, row in transition_counts.iterrows():
|
| 142 |
transition_matrix.at[row['LIEU_PRECEDENT'], row['LIEU_DIT']] = row['count']
|
| 143 |
|
| 144 |
return transition_matrix, lieux
|
| 145 |
+
|
| 146 |
+
def process_transition_matrix(transition_matrix, df, threshold=9):
|
| 147 |
# Transformer la matrice en table de connexions
|
| 148 |
transition_table = transition_matrix.stack().reset_index()
|
| 149 |
transition_table.columns = ['source', 'target', 'count']
|
|
|
|
| 161 |
# Séparer les paires de sites dans des colonnes distinctes
|
| 162 |
transition_table[['source', 'target']] = pd.DataFrame(transition_table['site_pair'].tolist(), index=transition_table.index)
|
| 163 |
|
| 164 |
+
# Filtrer pour ne garder que les connexions avec au moins un certain nombre d'occurrences
|
| 165 |
+
transition_table = transition_table[transition_table['count'] > threshold] # Valeur du seuil de significativité à sélectionner
|
| 166 |
|
| 167 |
# Obtenir les limites pour normaliser les valeurs de count
|
|
|
|
|
|
|
| 168 |
mean_count = transition_table['count'].mean()
|
| 169 |
std_count = transition_table['count'].std()
|
| 170 |
|
| 171 |
# Normaliser les valeurs de count entre 0 et 1
|
| 172 |
+
transition_table['normalized_count'] = (transition_table['count'] - mean_count) / std_count # Centrer-normer
|
|
|
|
| 173 |
|
| 174 |
+
# Création d'une colormap personnalisée
|
| 175 |
cmap = mcolors.LinearSegmentedColormap.from_list(
|
| 176 |
"CustomGreenYellowOrangeRed",
|
| 177 |
["#808080", "#4B4B4B", "#FC4C02", "#FF8C00", "#FF0000"]
|
| 178 |
)
|
| 179 |
|
| 180 |
+
# Calculer les couleurs basées sur les counts standardisés
|
| 181 |
transition_table['color'] = transition_table['normalized_count'].apply(lambda x: mcolors.to_hex(cmap(x)))
|
| 182 |
|
| 183 |
# Fusionner les coordonnées du df dans transition_table pour source et target
|
|
|
|
| 186 |
|
| 187 |
transition_table = transition_table.merge(coords, left_on='source', right_index=True)
|
| 188 |
transition_table = transition_table.merge(coords, left_on='target', right_index=True, suffixes=('_source', '_target'))
|
| 189 |
+
|
| 190 |
return transition_table
|
| 191 |
+
|
| 192 |
+
def create_trajectories_map_from_matrix(df, transition_table, width = 1200, height = 1000):
|
| 193 |
fig = go.Figure()
|
| 194 |
|
| 195 |
# Tracer chaque connexion individuellement avec sa couleur
|
| 196 |
for _, row in transition_table.iterrows():
|
| 197 |
fig.add_trace(go.Scattermapbox(
|
| 198 |
+
lat = [row['LAT_WGS_source'], row['LAT_WGS_target']],
|
| 199 |
+
lon = [row['LONG_WGS_source'], row['LONG_WGS_target']],
|
| 200 |
+
mode = 'lines',
|
| 201 |
+
line = dict(width = 1, color = row['color']), # Utiliser la couleur calculée
|
| 202 |
+
hovertext = f"Count: {row['count']}", # Ajout du texte de survol
|
| 203 |
+
hoverinfo = 'text', # Afficher le texte de survol
|
| 204 |
+
showlegend = False
|
| 205 |
))
|
| 206 |
|
| 207 |
# Ajouter tous les marqueurs pour les lieux d'un seul coup
|
| 208 |
coords = df[['LIEU_DIT', 'LAT_WGS', 'LONG_WGS']].drop_duplicates()
|
| 209 |
for _, coord in coords.iterrows():
|
| 210 |
fig.add_trace(go.Scattermapbox(
|
| 211 |
+
lat = [coord['LAT_WGS']],
|
| 212 |
+
lon = [coord['LONG_WGS']],
|
| 213 |
+
mode = 'markers+text',
|
| 214 |
+
marker = go.scattermapbox.Marker(
|
| 215 |
+
size = 6,
|
| 216 |
+
color = '#8467D7', # Violet clair
|
| 217 |
),
|
| 218 |
+
textfont = dict(color = 'grey'),
|
| 219 |
+
text = [coord['LIEU_DIT']],
|
| 220 |
+
textposition = "bottom right",
|
| 221 |
+
showlegend = False
|
| 222 |
))
|
| 223 |
|
| 224 |
# Définir le centre de la carte
|
| 225 |
center_lat, center_lon = df['LAT_WGS'].mean(), df['LONG_WGS'].mean()
|
| 226 |
|
| 227 |
fig.update_layout(
|
| 228 |
+
mapbox_style = "carto-positron", # Utiliser le style de carte noir et blanc
|
| 229 |
+
mapbox_zoom = 6,
|
| 230 |
+
mapbox_center = {"lat": center_lat, "lon": center_lon},
|
| 231 |
+
font = dict(color = 'black'),
|
| 232 |
+
height = height,
|
| 233 |
+
width = width
|
| 234 |
)
|
| 235 |
|
| 236 |
return fig
|
|
|
|
| 241 |
|
| 242 |
map_section = create_trajectories_map(df_empty)
|
| 243 |
|
| 244 |
+
with open("pages/page2.md", "r", encoding = "utf-8") as file:
|
| 245 |
page2 = file.read()
|
| 246 |
|
| 247 |
def refresh_button(state):
|
| 248 |
df_filtered = df_antenna.copy()
|
| 249 |
|
| 250 |
+
# Filtrer par département d'origine
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 251 |
if state.selected_dpt:
|
| 252 |
equipped_pit = df_filtered[(df_filtered['ACTION'] == 'T') &
|
| 253 |
(df_filtered['DEPARTEMENT'].isin(state.selected_dpt))]['NUM_PIT'].unique()
|
| 254 |
df_filtered = df_filtered[df_filtered['NUM_PIT'].isin(equipped_pit)]
|
| 255 |
|
| 256 |
+
# Filtrer par espèce
|
| 257 |
if state.selected_sp:
|
| 258 |
df_filtered = df_filtered[df_filtered['CODE_ESP'].isin(state.selected_sp)]
|
| 259 |
|
| 260 |
+
# Filtrer par genre
|
| 261 |
if state.selected_gender:
|
| 262 |
df_filtered = df_filtered[df_filtered['SEXE'].isin(state.selected_gender)]
|
| 263 |
|
| 264 |
+
# Filtrer par site d'origine
|
| 265 |
if state.selected_sites:
|
| 266 |
site_pit = df_filtered[(df_filtered['ACTION'] == 'T') &
|
| 267 |
(df_filtered['LIEU_DIT'].isin(state.selected_sites))]['NUM_PIT'].unique()
|
| 268 |
df_filtered = df_filtered[df_filtered['NUM_PIT'].isin(site_pit)]
|
| 269 |
|
| 270 |
+
# Filtrer par intervalle de dates
|
| 271 |
if state.selected_dates and len(state.selected_dates) == 2:
|
| 272 |
start_date = pd.Timestamp(state.selected_dates[0])
|
| 273 |
end_date = pd.Timestamp(state.selected_dates[1])
|
| 274 |
df_filtered = df_filtered[(df_filtered['DATE'] >= start_date) & (df_filtered['DATE'] <= end_date)]
|
| 275 |
|
| 276 |
+
# Rafraichir la carte
|
| 277 |
transition_matrix_filtered, labels_filtered = create_transition_matrix(df_filtered)
|
| 278 |
transition_table_filtered = process_transition_matrix(transition_matrix_filtered, df_filtered)
|
| 279 |
state.map_section = create_trajectories_map_from_matrix(df_filtered, transition_table_filtered)
|
|
|
|
| 281 |
## PHENOLOGIES DES SITES
|
| 282 |
|
| 283 |
def gant_diagram(df_original):
|
| 284 |
+
# Créer une copie du DataFrame original par sécurité
|
| 285 |
df_filtered = df_original[df_original['ACTION'] == "C"].copy()
|
| 286 |
|
| 287 |
# Convertir la colonne DATE en datetime et localiser en UTC
|
|
|
|
| 289 |
df_filtered['DATE'] = df_filtered['DATE'].dt.tz_localize('UTC')
|
| 290 |
|
| 291 |
# Trier les valeurs par DATE
|
| 292 |
+
df_filtered = df_filtered.sort_values(by = 'DATE', ascending = True)
|
| 293 |
|
| 294 |
# Ajouter des colonnes pour les visites précédentes et les nouvelles visites
|
| 295 |
df_filtered['Prev_DATE'] = df_filtered.groupby(['LIEU_DIT'])['DATE'].shift(1)
|
| 296 |
df_filtered['New_Visit'] = (df_filtered['DATE'] - df_filtered['Prev_DATE']).dt.days > 1
|
| 297 |
+
df_filtered['New_Visit'].fillna(True, inplace = True)
|
| 298 |
df_filtered['Visit_ID'] = df_filtered.groupby(['LIEU_DIT'])['New_Visit'].cumsum()
|
| 299 |
|
| 300 |
# Supprimer les LIEU_DIT qui n'ont aucune visite détectée
|
| 301 |
+
df_filtered = df_filtered.dropna(subset = ['Visit_ID'])
|
| 302 |
|
| 303 |
# Calculer min/max dates pour chaque visite
|
| 304 |
result = df_filtered.groupby(['LIEU_DIT', 'Visit_ID']).agg(
|
| 305 |
+
Start = pd.NamedAgg(column = 'DATE', aggfunc = 'min'),
|
| 306 |
+
Finish = pd.NamedAgg(column = 'DATE', aggfunc = 'max'),
|
| 307 |
+
Departement = pd.NamedAgg(column = 'DEPARTEMENT', aggfunc = 'first')
|
| 308 |
).reset_index()
|
| 309 |
|
| 310 |
# Supprimer les LIEU_DIT qui n'ont pas de dates valides
|
| 311 |
+
result = result.dropna(subset = ['Start', 'Finish'])
|
| 312 |
|
| 313 |
# Filtrer les LIEU_DIT qui ont des résultats
|
| 314 |
result = result[result['Finish'] > result['Start']]
|
| 315 |
|
| 316 |
# Trier les sites par ordre alphabétique et les départements pour la légende
|
| 317 |
+
result = result.sort_values(by = ['Departement', 'LIEU_DIT'])
|
|
|
|
|
|
|
| 318 |
result['Start'] = pd.to_datetime(result['Start'])
|
| 319 |
result['Finish'] = pd.to_datetime(result['Finish'])
|
| 320 |
|
| 321 |
# Déterminer la hauteur de la figure en fonction du nombre de sites
|
| 322 |
num_sites = result['LIEU_DIT'].nunique()
|
| 323 |
+
height_per_site = 50 # Hauteur allouée par site en pixels
|
| 324 |
+
base_height = 100 # Hauteur de base pour la figure
|
| 325 |
+
max_height = 20000 # Hauteur maximale de la figure
|
| 326 |
fig_height = min(base_height + num_sites * height_per_site, max_height)
|
| 327 |
|
| 328 |
# Créer le diagramme de Gantt
|
| 329 |
fig = px.timeline(
|
| 330 |
result,
|
| 331 |
+
x_start = 'Start',
|
| 332 |
+
x_end = 'Finish',
|
| 333 |
+
y = 'LIEU_DIT',
|
| 334 |
+
color = 'Departement', # Changer la couleur en fonction du département
|
| 335 |
+
color_discrete_sequence = px.colors.qualitative.Plotly,
|
| 336 |
+
labels = {'LIEU_DIT': 'Site'},
|
| 337 |
+
title = 'Présence sur chaque site'
|
| 338 |
)
|
| 339 |
|
| 340 |
+
# Ajout de lignes verticales pour chaque 1er janvier
|
| 341 |
start_year = result['Start'].dt.year.min()
|
| 342 |
end_year = result['Finish'].dt.year.max()
|
| 343 |
for year in range(start_year, end_year + 1):
|
| 344 |
fig.add_vline(
|
| 345 |
+
x = pd.Timestamp(f'{year}-01-01'),
|
| 346 |
+
line = dict(color = 'grey', dash = 'dash', width = 1)
|
| 347 |
)
|
| 348 |
|
| 349 |
+
fig.update_layout(legend = dict(traceorder = "normal"), margin = dict(l = 300))
|
|
|
|
| 350 |
|
| 351 |
# Mise à jour des étiquettes et de l'orientation de l'axe des ordonnées
|
| 352 |
+
fig.update_yaxes(categoryorder = 'total ascending') # Tri les sites par date de début (si nécessaire)
|
| 353 |
fig.update_layout(
|
| 354 |
+
xaxis_title = 'Date',
|
| 355 |
+
yaxis_title = 'Site',
|
| 356 |
+
xaxis = dict(tickformat = '%Y-%m-%d'), # Format des dates sur l'axe X pour plus de clarté
|
| 357 |
+
height = fig_height,
|
| 358 |
+
legend = dict(bgcolor = 'rgba(0, 0, 0, 0)')
|
| 359 |
)
|
| 360 |
|
| 361 |
return fig
|
| 362 |
|
| 363 |
gant_global = gant_diagram(df_antenna)
|
| 364 |
|
| 365 |
+
with open("pages/page3.md", "r", encoding = "utf-8") as file:
|
| 366 |
page3 = file.read()
|
| 367 |
|
| 368 |
def update_gant(state):
|
| 369 |
df_filtered_gant = df_antenna.copy()
|
| 370 |
df_filtered_gant = df_filtered_gant[df_filtered_gant['ACTION'] == "C"]
|
| 371 |
+
|
| 372 |
# Convertir les dates sélectionnées en objets Timestamp
|
| 373 |
if state.selected_dpt_gant:
|
| 374 |
df_filtered_gant = df_filtered_gant[df_filtered_gant['DEPARTEMENT'].isin(state.selected_dpt_gant)]
|
| 375 |
+
|
| 376 |
if state.dates_gant and len(state.dates_gant) == 2:
|
|
|
|
| 377 |
start_date = pd.Timestamp(state.dates_gant[0])
|
| 378 |
end_date = pd.Timestamp(state.dates_gant[1])
|
| 379 |
+
|
| 380 |
# Filtrer les dates
|
| 381 |
df_filtered_gant = df_filtered_gant[(df_filtered_gant['DATE'] >= start_date) & (df_filtered_gant['DATE'] <= end_date)]
|
| 382 |
+
|
| 383 |
state.gant_global = gant_diagram(df_filtered_gant)
|
| 384 |
|
| 385 |
## STATISTIQUES ANALYTIQUES
|
| 386 |
|
| 387 |
def detection_by_year(df_antenna):
|
| 388 |
df_antenna['YEAR'] = df_antenna['DATE'].dt.year
|
| 389 |
+
grouped_data = df_antenna.groupby(['YEAR', 'CODE_ESP']).size().reset_index(name = 'Detections')
|
| 390 |
|
| 391 |
+
fig = px.bar(grouped_data, x = 'YEAR', y = 'Detections', color = 'CODE_ESP',
|
| 392 |
+
labels = {'Detections': 'Nombre de détections', 'CODE_ESP': 'Espèce'},
|
| 393 |
)
|
| 394 |
|
| 395 |
fig.update_layout(
|
| 396 |
+
height = 400,
|
| 397 |
+
width = 400,
|
| 398 |
+
yaxis_title = 'Nombre de détections',
|
| 399 |
+
xaxis_title = None,
|
| 400 |
+
xaxis = {'type': 'category', 'tickmode': 'linear'}, # Afficher toutes les années
|
| 401 |
+
barmode = 'stack',
|
| 402 |
+
legend = dict(
|
| 403 |
+
bgcolor = 'rgba(0, 0, 0, 0)',
|
| 404 |
+
orientation = 'h', # Légende horizontale
|
| 405 |
+
x = 0.5, # Centrer la légende horizontalement
|
| 406 |
+
y = -0.2, # Placer la légende en dessous de la figure
|
| 407 |
+
xanchor = 'center', # Ancrer la légende au centre
|
| 408 |
+
yanchor = 'top' # Ancrer la légende au-dessus de l'axe x
|
| 409 |
),
|
| 410 |
+
margin = dict(b = 100)
|
| 411 |
)
|
| 412 |
|
| 413 |
return fig
|
| 414 |
+
|
| 415 |
def capture_by_year(df_antenna):
|
| 416 |
df_filtre = df_antenna[df_antenna['ACTION'] == 'T']
|
| 417 |
df_filtre['YEAR'] = df_filtre['DATE'].dt.year
|
| 418 |
df_grouped = df_filtre.groupby(['YEAR', 'CODE_ESP'])['NUM_PIT'].nunique().reset_index()
|
| 419 |
+
df_grouped.rename(columns = {'NUM_PIT': 'Nombre d\'individus uniques'}, inplace = True)
|
| 420 |
|
| 421 |
+
fig = px.bar(df_grouped, x = 'YEAR', y = 'Nombre d\'individus uniques', color = 'CODE_ESP',
|
| 422 |
+
labels = {'Nombre d\'individus uniques': 'Nombre d\'individus uniques', 'CODE_ESP': 'Espèce'}
|
| 423 |
)
|
| 424 |
|
| 425 |
fig.update_layout(
|
| 426 |
+
height = 400,
|
| 427 |
+
width = 400,
|
| 428 |
+
yaxis_title = 'Nombre d\'individus uniques',
|
| 429 |
+
xaxis_title = None,
|
| 430 |
+
xaxis = {'type': 'category', 'tickmode': 'linear'}, # Afficher toutes les années
|
| 431 |
+
legend = dict(
|
| 432 |
+
bgcolor ='rgba(0, 0, 0, 0)',
|
| 433 |
+
orientation = 'h', # Légende horizontale
|
| 434 |
+
x = 0.5, # Centrer la légende horizontalement
|
| 435 |
+
y = -0.2, # Placer la légende en dessous de la figure
|
| 436 |
+
xanchor = 'center', # Ancrer la légende au centre
|
| 437 |
+
yanchor = 'top' # Ancrer la légende au-dessus de l'axe x
|
| 438 |
),
|
| 439 |
+
margin = dict(b = 100)
|
| 440 |
)
|
| 441 |
|
| 442 |
return fig
|
| 443 |
+
|
| 444 |
def control_by_year(df_antenna):
|
| 445 |
df_filtre = df_antenna[df_antenna['ACTION'] == 'C']
|
| 446 |
df_filtre['YEAR'] = df_filtre['DATE'].dt.year
|
| 447 |
df_grouped = df_filtre.groupby(['YEAR', 'CODE_ESP'])['NUM_PIT'].nunique().reset_index()
|
| 448 |
+
df_grouped.rename(columns = {'NUM_PIT': 'Nombre d\'individus'}, inplace = True)
|
| 449 |
|
| 450 |
+
fig = px.bar(df_grouped, x = 'YEAR', y = 'Nombre d\'individus', color = 'CODE_ESP',
|
| 451 |
+
labels = {'Nombre d\'individus': 'Nombre d\'individus', 'CODE_ESP': 'Espèce'},
|
| 452 |
)
|
| 453 |
|
| 454 |
fig.update_layout(
|
| 455 |
+
height = 400,
|
| 456 |
+
width = 400,
|
| 457 |
+
yaxis_title = 'Nombre d\'individus',
|
| 458 |
+
xaxis_title = None,
|
| 459 |
+
xaxis = {'type': 'category', 'tickmode': 'linear'}, # Afficher toutes les années
|
| 460 |
+
legend = dict(
|
| 461 |
+
bgcolor = 'rgba(0, 0, 0, 0)',
|
| 462 |
+
orientation = 'h', # Légende horizontale
|
| 463 |
+
x = 0.5, # Centrer la légende horizontalement
|
| 464 |
+
y = -0.2, # Placer la légende en dessous de la figure
|
| 465 |
+
xanchor = 'center', # Ancrer la légende au centre
|
| 466 |
+
yanchor = 'top' # Ancrer la légende au-dessus de l'axe x
|
| 467 |
),
|
| 468 |
+
margin = dict(b = 100)
|
| 469 |
)
|
| 470 |
|
| 471 |
return fig
|
| 472 |
+
|
| 473 |
def detection_frequencies(df_antenna):
|
| 474 |
+
df_antenna['MONTH_DAY'] = df_antenna['DATE'].dt.strftime('%m-%d')
|
| 475 |
+
global_freq = df_antenna.groupby('MONTH_DAY').size().reset_index(name = 'Global Detections')
|
| 476 |
|
| 477 |
# Calculer les fréquences par site
|
| 478 |
+
site_freq = df_antenna.groupby(['MONTH_DAY', 'LIEU_DIT']).size().reset_index(name = 'Detections')
|
| 479 |
sites = site_freq['LIEU_DIT'].unique()
|
| 480 |
|
| 481 |
# Préparer l'ordre chronologique
|
| 482 |
months_days = pd.date_range('2021-01-01', '2021-12-31').strftime('%m-%d')
|
| 483 |
+
global_freq['MONTH_DAY'] = pd.Categorical(global_freq['MONTH_DAY'], categories = months_days, ordered = True)
|
| 484 |
global_freq = global_freq.sort_values('MONTH_DAY')
|
| 485 |
|
| 486 |
# Création du graphique
|
| 487 |
fig = go.Figure()
|
| 488 |
|
| 489 |
# Ajouter la courbe globale
|
| 490 |
+
fig.add_trace(go.Scatter(x = global_freq['MONTH_DAY'], y = global_freq['Global Detections'],
|
| 491 |
+
mode = 'lines', name = 'Global'))
|
| 492 |
|
| 493 |
# Ajouter une courbe pour chaque site
|
| 494 |
for site in sites:
|
| 495 |
site_data = site_freq[site_freq['LIEU_DIT'] == site]
|
| 496 |
+
site_data['MONTH_DAY'] = pd.Categorical(site_data['MONTH_DAY'], categories = months_days, ordered = True)
|
| 497 |
site_data = site_data.sort_values('MONTH_DAY')
|
| 498 |
+
fig.add_trace(go.Scatter(x = site_data['MONTH_DAY'], y = site_data['Detections'],
|
| 499 |
+
mode = 'lines', name = site))
|
| 500 |
|
| 501 |
# Mise à jour du layout
|
| 502 |
fig.update_layout(
|
| 503 |
+
xaxis_title = 'Jour de l\'année',
|
| 504 |
+
yaxis_title = 'Nombre de détections',
|
| 505 |
+
xaxis = dict(type = 'category', categoryorder = 'array', categoryarray = [md for md in months_days]),
|
| 506 |
+
legend = dict(bgcolor = 'rgba(0, 0, 0, 0)'),
|
| 507 |
+
#yaxis = dict(range = [0, global_freq['Global Detections'].max() + 10])
|
| 508 |
)
|
| 509 |
return fig
|
| 510 |
+
|
| 511 |
def pie_controled(df_antenna):
|
| 512 |
species_counts = df_antenna[df_antenna['ACTION'] == 'C']['CODE_ESP'].value_counts().reset_index()
|
| 513 |
species_counts.columns = ['Species', 'Count']
|
| 514 |
|
| 515 |
# Créer un diagramme circulaire
|
| 516 |
fig = px.pie(species_counts,
|
| 517 |
+
values = 'Count', names = 'Species',
|
| 518 |
+
color_discrete_sequence = px.colors.qualitative.Pastel,
|
| 519 |
hole = 0.5,)
|
| 520 |
|
| 521 |
# Personnalisation supplémentaire
|
| 522 |
+
fig.update_layout(legend_title_text = 'Espèce',
|
| 523 |
showlegend = True,
|
| 524 |
+
legend = dict(bgcolor = 'rgba(0, 0, 0, 0)')
|
| 525 |
)
|
| 526 |
return fig
|
| 527 |
+
|
| 528 |
def pie_marked(df_antenna):
|
| 529 |
marked_species = df_antenna[df_antenna['ACTION'] == 'T']
|
| 530 |
species_counts = marked_species['CODE_ESP'].value_counts().reset_index()
|
|
|
|
| 532 |
|
| 533 |
# Créer un diagramme circulaire
|
| 534 |
fig = px.pie(species_counts,
|
| 535 |
+
values = 'Count', names = 'Species',
|
| 536 |
+
color_discrete_sequence = px.colors.qualitative.Pastel,
|
| 537 |
hole = 0.5)
|
| 538 |
|
| 539 |
# Personnalisation supplémentaire
|
| 540 |
+
fig.update_layout(legend_title_text = 'Espèce',
|
| 541 |
showlegend = True,
|
| 542 |
+
legend = dict(bgcolor = 'rgba(0, 0, 0, 0)')
|
| 543 |
)
|
| 544 |
return fig
|
| 545 |
+
|
| 546 |
def top_detection(df_antenna):
|
| 547 |
df_antenna['NUM_PIT'] = "n° " + df_antenna['NUM_PIT'].astype(str)
|
| 548 |
|
|
|
|
| 551 |
|
| 552 |
# Créer un DataFrame à partir des dix premières catégories
|
| 553 |
df_top_categories = pd.DataFrame({'NUM_PIT': top_categories.index, 'Occurrences': top_categories.values})
|
| 554 |
+
df_top_categories = df_top_categories.sort_values(by = 'Occurrences', ascending = False)
|
| 555 |
|
| 556 |
+
# Créer le diagramme en barres horizontales
|
| 557 |
+
fig = px.bar(df_top_categories, x = 'Occurrences', y = 'NUM_PIT', orientation = 'h',
|
| 558 |
+
labels = {'Occurrences': "Nombre d'occurrences", 'NUM_PIT': ''},
|
| 559 |
+
color = 'NUM_PIT', color_discrete_sequence = px.colors.qualitative.Set3)
|
| 560 |
|
| 561 |
# Ajuster la hauteur en fonction du nombre de catégories (avec une hauteur minimale de 400 pixels)
|
| 562 |
bar_height = 50 # Hauteur de chaque barre
|
|
|
|
| 565 |
|
| 566 |
# Mettre à jour la mise en page avec des marges ajustées
|
| 567 |
fig.update_layout(
|
| 568 |
+
height = calculated_height, # Hauteur dynamique
|
| 569 |
+
showlegend = False,
|
| 570 |
+
margin = dict(l = 200), # Augmenter la marge gauche pour mieux lire les annotations
|
| 571 |
+
legend = dict(bgcolor = 'rgba(0, 0, 0, 0)')
|
| 572 |
)
|
| 573 |
|
| 574 |
return fig
|
| 575 |
|
| 576 |
+
# Initialisation de tous les plots
|
| 577 |
+
plot_detection_year = detection_by_year(df_antenna) # Barplot du nombre de détections par an et par espèce
|
| 578 |
+
plot_capture_year = capture_by_year(df_antenna) # Barplot du nombre de captures par an et par espèces
|
| 579 |
+
plot_control_year = control_by_year(df_antenna) # Barplot du nombre de contrôles par an et par espèces
|
| 580 |
+
plot_frequencies = detection_frequencies(df_antenna) # Courbes de fréquences de détections par jour de l'année et par site
|
| 581 |
+
plot_pie_controled = pie_controled(df_antenna) # Pieplot des individus contrôlés
|
| 582 |
+
plot_pie_marked = pie_marked(df_antenna) # Pieplot des individus marqués
|
| 583 |
+
plot_top_detection = top_detection(df_antenna) # Barplot horizontal des 10 individus les plus détectés
|
| 584 |
+
table_plot_raw = process_transition_matrix(transition_matrix, df_antenna, threshold = 0)
|
| 585 |
+
transition_table_plot = table_plot_raw[['source', 'target', 'count']].sort_values(by = 'count', ascending = False) # Table des trajectoires
|
| 586 |
+
|
| 587 |
+
# Initialisation des variables à plot
|
| 588 |
+
total_captured = len(df_antenna[(df_antenna['ACTION'] == 'T') | (df_antenna['ACTION'] == 'M')]) # Individus capturés
|
| 589 |
+
total_recaptured = df_antenna[df_antenna['ACTION'] == 'C']['NUM_PIT'].nunique() # Individus contrôlés
|
| 590 |
+
total_marked = df_antenna[df_antenna['ACTION'] == 'T']['NUM_PIT'].nunique() # Individus marqués
|
| 591 |
+
sites_capture = df_antenna[df_antenna['ACTION'] == 'T']['LIEU_DIT'].nunique() # Sites capturés au moins une fois
|
| 592 |
+
sites_antennes = df_antenna['LIEU_DIT'].nunique() # Sites contrôlés au moins une fois
|
| 593 |
+
|
| 594 |
+
with open("pages/page4.md", "r", encoding = "utf-8") as file:
|
| 595 |
page4 = file.read()
|
| 596 |
|
| 597 |
# DEMARRAGE DE L'APPLICATION
|
|
|
|
| 603 |
"statistiques": page4
|
| 604 |
}
|
| 605 |
|
| 606 |
+
gui = Gui(pages = pages, css_file = "assets/styles.css")
|
| 607 |
+
gui.run(host = '0.0.0.0', port = 5000, use_session = True) # use_session = True pour permettre l'usage de plusieurs users en même temps
|