#!/usr/bin/env python3.7 # -*- coding: utf-8 -*- #Author: Lirsen Myrtaj #MUST BE RUN FROM "Guidehouse/FSI Indicators" directory import streamlit as st import streamlit.components.v1 as components import os import matplotlib.pyplot as plt import csv from datetime import datetime import time import folium import pandas as pd import json from folium import plugins from folium import IFrame #pulls countries and indicators into a list to use in GUI to present data countries = [] indicators = [] file_name = os.getcwd() + "/FSI_data.csv" with open(file_name, "r") as file: rows = csv.reader(file) for row in list(rows)[1:]: if row[4] not in countries: countries.append(row[4]) if row[1] not in indicators: indicators.append(row[1]) class Dashboard_data: # made a class since there are multiple functions that can be easier split up def __init__(self, country): self.country = country def metadata(self): # gives metadata for nation, ie. region and income class etc with open(file_name, "r") as file: metadata = {} rows = csv.reader(file) for row in list(rows)[1:]: # turned into a list object to avoid picking up necessary values in row 1 on sheet if self.country == row[4] and row[7] == "2020": metadata = {"name": self.country, "region": row[5], "income group": row[6], "id": row[3], "overall score": row[8]} break # break to stop flow of data into dictionary, otherwise it would be full of repeated data return metadata def FSI_indicators(self): # stores the indicator data into dictionary above which can be parsed with open(file_name, "r") as file: indicators_ = [] years_ = [] values_ = [] data = {} rows = csv.reader(file) for row in list(rows)[1:]: # turned into a list object to avoid picking up necessary values in row 1 on sheet if self.country == row[4]: indicator = row[1].split(": ")[1] # removes "Fragile State Indicator" substring year = row[7] # indicator plus year to do time-series analysis in changes_per_year method value = row[8] indicators_.append(indicator) years_.append(year) values_.append(value) # adds data into dictionary to parse for changes_per_year method indicator_ = row[1] # removes "Fragile State Indicator" substring key = indicator_+" ({})".format(row[7]) # indicator plus year to do time-series analysis in changes_per_year method data[key] = row[8] df = pd.DataFrame({ "Year": years_, "Indicator": indicators_, "Rank/Score": values_ }) return df, data def changes_per_year(self, data, indicator): # indicator is used to create a plot that will be displayed keys = data.keys() # these are the indicator + year years = [] for key in keys: year = key.split(") ")[1] if year not in years: years.append(year) x_axis = sorted(years, reverse=False) y_axis = [] for year in years: y_axis.append(float(data[indicator+" "+year])) # pulls data from dictionary and appends to y axis values plt.title("{} - {}".format(self.country, indicator.split(": ")[1])) plt.ylabel("FSI Score") plt.xlabel("Year") plt.plot(x_axis, y_axis) file = "plot.jpg" plt.savefig(file) def income_score_change(self): years = [] income_groups = [] # used to determine if income group changes; if length is greater than 1, it indicates more than one value and thus change with open(file_name, "r") as file: rows = csv.reader(file) for row in list(rows)[1:]: # turned into a list object to avoid picking up necessary values in row 1 on sheet if row[7] not in years: years.append(row[7]) for year in sorted(years, reverse=False): if self.country == row[4] and year == row[7] and row[6] not in income_groups: income_groups.append(row[6]) if len(income_groups) > 1: statement = "{} became a {} in {}".format(row[4], income_groups[0], row[7]) return statement else: statement = "No change in income group for {} across all years".format(row[4]) return statement def regional_group_change(self): years = [] regional_groups = [] # used to determine if regional group changes; if length is greater than 1, it indicates more than one value and thus change with open(file_name, "r") as file: rows = csv.reader(file) for row in list(rows)[1:]: # turned into a list object to avoid picking up necessary values in row 1 on sheet if row[7] not in years: years.append(row[7]) for year in sorted(years, reverse=False): if self.country == row[4] and year == row[7] and row[5] not in regional_groups: regional_groups.append(row[5]) if len(regional_groups) > 1: statement = "{} became a {} country in {}".format(row[4], regional_groups[0], row[7]) return statement else: statement = "No change in regional group for {} across all years".format(row[4]) return statement class Country_map: def __init__(self, country): self.country = country # centers country for map def get_country_center(self): with open(os.getcwd()+"/country_centers.csv", "r") as file: rows = csv.reader(file) for row in rows: if row[3] == self.country: return {"lat": row[1], "long": row[2]} # gets bounds of country to figure out zoom of the map def get_bounds(self): countries_json_file = open(os.getcwd()+"/world-countries.json", "r") countries_json = json.load(countries_json_file)["features"] countries_boundaries = [] for dataset in countries_json: if dataset["properties"] == {"name": self.country}: if len(dataset["geometry"]["coordinates"]) == 1: countries_boundaries = dataset["geometry"]["coordinates"][0] elif len(dataset["geometry"]["coordinates"]) > 1: for boundary in dataset["geometry"]["coordinates"]: for b in boundary[0]: countries_boundaries.append(b) df = pd.DataFrame(countries_boundaries, columns=["Long", "Lat"]) sw = df[["Long", "Lat"]].min().values.tolist() ne = df[["Long", "Lat"]].max().values.tolist() return sw, ne # returns capital def get_capital(self): capitals_file = os.getcwd()+"/capitals.csv" with open(capitals_file, "r", encoding="utf8") as file: rows = csv.reader(file) for row in rows: if row[0] == self.country: capital_data = {"name": row[1], "lat": row[2], "long": row[3]} return capital_data def make_profile(): st.title("Fragile State Indicators") country_selection = st.selectbox("Choose a country:", sorted(countries, reverse=False)) def make_map(country): map_init = Country_map(country) center = map_init.get_country_center() bounds = map_init.get_bounds() country_map = folium.Map(location=[center["lat"], center["long"]], tiles="CartoDB positron", max_bounds=True) # Lat long is reversed in JSON file so I reverse it here sw = [bounds[0][1], bounds[0][0]] ne = [bounds[1][1], bounds[1][0]] country_map.fit_bounds([sw, ne]) # input capital capital_data = map_init.get_capital() text = capital_data["name"] iframe = folium.IFrame(text, width=150, height=50) popup = folium.Popup(iframe, max_width=250) folium.Marker(location=[capital_data["lat"], capital_data["long"]], popup=popup).add_to(country_map) country_map.save("map.html") data_init = Dashboard_data(country_selection) if st.button("Show Profile"): st.session_state['check'] = True metadata = data_init.metadata() st.write("{} (ID: {}) is a country in {}. Its income group is {}. Its overall FSI rank for 2020 was: {}. (Capital marked with blue marker)".format( metadata["name"], metadata["id"], metadata["region"], metadata["income group"], metadata["overall score"] )) make_map(country_selection) HtmlFile = open(os.getcwd()+"/map.html", "r", encoding="utf-8") source_code = HtmlFile.read() components.html(source_code, height=450) FSI_indicators = data_init.FSI_indicators() st.dataframe(FSI_indicators[0]) income_score_change = data_init.income_score_change() st.write(income_score_change) regional_group_change = data_init.regional_group_change() st.write(regional_group_change) if __name__ == "__main__": make_profile()