COULIBALY BOURAHIMA commited on
Commit
a61aa1b
1 Parent(s): 28524a9

application

Browse files
.gcloudignore ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file specifies files that are *not* uploaded to Google Cloud
2
+ # using gcloud. It follows the same syntax as .gitignore, with the addition of
3
+ # "#!include" directives (which insert the entries of the given .gitignore-style
4
+ # file at that point).
5
+ #
6
+ # For more information, run:
7
+ # $ gcloud topic gcloudignore
8
+ #
9
+ .gcloudignore
10
+ # If you would like to upload your .git directory, .gitignore file or files
11
+ # from your .gitignore file, remove the corresponding line
12
+ # below:
13
+ .git
14
+ .gitignore
15
+
16
+ # Python pycache:
17
+ __pycache__/
18
+ # Ignored by the build system
19
+ /setup.cfg
.streamlit/config.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [theme]
2
+ base="dark"
3
+ font="serif"
.streamlit/secrets.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [api_keys]
2
+ identifiant = "rupture-gestion"
3
+ password = "Carrefour123@"
App/class_input_box/__init__.py ADDED
File without changes
App/class_input_box/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (191 Bytes). View file
 
App/class_input_box/__pycache__/input_box.cpython-311.pyc ADDED
Binary file (4.42 kB). View file
 
App/class_input_box/input_box.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import streamlit as st
3
+ from typing import Dict
4
+
5
+
6
+ class InputsBox:
7
+ def __init__(self):
8
+ self.data = None
9
+ self.columns = None
10
+ self.product_id = None
11
+ self.class_id = None
12
+
13
+ def get_data(self):
14
+ uploaded_file = st.file_uploader("Choose a CSV file", type=["csv", "tsv"])
15
+
16
+ if uploaded_file is not None:
17
+ try:
18
+ self.data = pd.read_csv(uploaded_file, dtype=str)
19
+ except:
20
+ self.data = pd.read_csv(uploaded_file, sep='\t', dtype=str)
21
+
22
+ self.columns = self.data.columns.tolist()
23
+
24
+ return self.data
25
+
26
+ def valid_produict_id(self) -> int:
27
+ min_len = st.number_input("Minimum len of product_id", max_value= 25, min_value=1, value= 2, key = "pp")
28
+ return min_len
29
+
30
+ def valid_class_id(self) -> str:
31
+ valid = st.text_input("First element of No valid class_id separed by ;")
32
+ return valid.split(";")
33
+
34
+ def get_product_id(self) -> str:
35
+ self.product_id = st.selectbox("product_id", options=self.columns, key="product_id")
36
+ return self.product_id
37
+
38
+ def get_class_id(self) -> str:
39
+ self.class_id = st.selectbox("class_id", options=self.columns, key="class_id")
40
+ return self.class_id
41
+
42
+ def get_conditions(self):
43
+ conditions = st.multiselect(label="Conditions", options=("Number of countries", "Proportion", "Priority Countries"), key="conditions")
44
+ return conditions
45
+
46
+ def conditions(self):
47
+ col1, col2, col3 = st.columns(3)
48
+ conditions = {}
49
+
50
+ with col1:
51
+ nb_countries = st.number_input("Number of countries", min_value=1, max_value=20, value=1, key="Number of countries")
52
+ conditions['Number of countries'] = nb_countries
53
+
54
+ with col2 :
55
+ proportion = st.number_input("Proportion", min_value=0.10, max_value=1.00, value=0.75, key="proportion")
56
+ conditions['Proportion'] = proportion
57
+
58
+ with col3 :
59
+ countries = st.checkbox("Show data with ratios ", value=True, key="show_ratio_checkbox")
60
+ conditions["Show data with ratios"] = countries
61
+
62
+ return conditions
App/functions_rupture/__init__.py ADDED
File without changes
App/functions_rupture/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (189 Bytes). View file
 
App/functions_rupture/__pycache__/class_gestion.cpython-311.pyc ADDED
Binary file (5.7 kB). View file
 
App/functions_rupture/__pycache__/functions_gestion.cpython-311.pyc ADDED
Binary file (13.8 kB). View file
 
App/functions_rupture/functions_gestion.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ import streamlit as st
4
+ from App.utils.priorite_pays import *
5
+ import nltk
6
+ nltk.download('stopwords')
7
+
8
+
9
+
10
+ def valide_key(data, product_id, class_id, min_prd_id, valid_class_id):
11
+ data = data[data[product_id].str.len() > min_prd_id]
12
+ try :
13
+ data = data[~data[class_id].str[0].isin(valid_class_id)]
14
+ except :
15
+ pass
16
+ return data
17
+
18
+
19
+ def calcul_total_par_ligne(data, produit_id, class_id):
20
+
21
+ matrice = pd.crosstab(data[produit_id], data[class_id])
22
+ total_by_ligne = matrice.sum(axis = 1)
23
+
24
+ plus_k_2 = list(total_by_ligne[total_by_ligne.apply(lambda x : x > 1)].index)
25
+ df = data[data[produit_id].isin(plus_k_2)]
26
+ matrice = pd.crosstab(df[produit_id], df[class_id])
27
+
28
+ total_by_ligne = matrice.sum(axis = 1)
29
+ total_by_line = pd.DataFrame({produit_id : total_by_ligne.index , "total_by_ligne": total_by_ligne.values})
30
+
31
+ return total_by_line, matrice
32
+
33
+
34
+ @st.cache_data
35
+ def matrice_creuse(matrice, produit_id, class_id):
36
+ stacked = matrice.stack()
37
+ non_zero = stacked[stacked != 0]
38
+ ligne = non_zero.index.get_level_values(0).astype(str).tolist()
39
+ colonne = non_zero.index.get_level_values(1).astype(str).tolist()
40
+ valeur = non_zero.values.tolist()
41
+ sparse_matrix = pd.DataFrame({produit_id : ligne, class_id : colonne, "nombre": valeur})
42
+ return sparse_matrix
43
+
44
+
45
+ @st.cache_data
46
+ def nouvelle_data( data, produit_id, class_id):
47
+
48
+ total_by_line, matrice = calcul_total_par_ligne(data, produit_id, class_id)
49
+ sparse_matrix = matrice_creuse( matrice, produit_id,class_id)
50
+ Data = pd.merge(sparse_matrix, total_by_line, on =[produit_id])
51
+ Data["Proportion"] = Data.nombre / Data.total_by_ligne
52
+ merged = Data.merge(data, left_on=[class_id, produit_id], right_on=[class_id, produit_id])
53
+ try :
54
+ Country = merged.groupby([class_id, produit_id])['Country'].agg(lambda x: x.tolist())
55
+ except :
56
+ try :
57
+ Country = merged.groupby([class_id, produit_id])['COUNTRY_KEY'].agg(lambda x: x.tolist())
58
+ except :
59
+ try :
60
+ Country = merged.groupby([class_id, produit_id])['COUNTRY'].agg(lambda x: x.tolist())
61
+ except :
62
+ pass
63
+ return Country, merged
64
+
65
+
66
+ def country(produit_id, class_id, Country):
67
+ return Country[produit_id, class_id]
68
+
69
+
70
+ def finale_merged(merged, Country, produit_id, class_id):
71
+ merged_finale = None
72
+ try:
73
+ merged["Countries"] = merged.apply(lambda row: country(row[1], row[0], Country), axis=1)
74
+ merged['Countries'] = merged['Countries'].apply(tuple)
75
+ merged_finale = merged.drop_duplicates(subset=[produit_id, class_id, 'Countries'])
76
+ except Exception as e:
77
+ st.warning(f"Une erreur s'est produite : {e}")
78
+ finally:
79
+ return merged_finale
80
+
81
+
82
+ def cond_pays_proportion(merged_finale, nb_pays, proportion, produit_id):
83
+
84
+ data = merged_finale[((merged_finale.Proportion >= proportion) & (merged_finale.total_by_ligne >= nb_pays))]
85
+ cles = data[produit_id].unique()
86
+ df = merged_finale[merged_finale[produit_id].isin(cles)]
87
+ return df
88
+
89
+
90
+ def cond_pays_priorite(merged_finale, produit_id):
91
+
92
+ data = merged_finale[((merged_finale.Proportion == 0.5) & (merged_finale.total_by_ligne >= 2))]
93
+ cles = data[produit_id].unique()
94
+ df = merged_finale[merged_finale[produit_id].isin(cles)]
95
+
96
+ df.loc[:, "Poids"] = df["Countries"].apply(lambda x : np.sum([dico[y] for y in x]))
97
+
98
+ duplicated_subclass = df.duplicated(subset=[produit_id, "Poids"], keep=False)
99
+ df_equa = df[duplicated_subclass]
100
+
101
+ df_nequa = df[~df.isin(df_equa)].dropna()
102
+
103
+ return df, df_equa, df_nequa
104
+
105
+
106
+
107
+ import pandas as pd
108
+
109
+ def data_1_1(df_nequa, produit_id, class_id):
110
+ #st.write(f'{class_id[:-4]}_DESC_FR')
111
+ df_nequa_2 = df_nequa[(df_nequa.Countries.apply(lambda x: len(x) > 1))]
112
+ max_poids_index = df_nequa_2.groupby(produit_id)['Poids'].idxmax()
113
+
114
+ for barcode in df_nequa_2[produit_id].unique().tolist():
115
+ df_nequa_2.loc[df_nequa_2[produit_id] == barcode, class_id]= df_nequa_2.loc[max_poids_index[barcode],class_id]
116
+ df_nequa_2.loc[df_nequa_2[produit_id] == barcode, f'{class_id[:-4]}_DESC_FR']= df_nequa_2.loc[max_poids_index[barcode],f'{class_id[:-4]}_DESC_FR']
117
+
118
+ df_duplicate = df_nequa_2.copy()
119
+ df_duplicate.Countries = df_duplicate.Countries.apply(lambda x : ','.join(x))
120
+ new_rows = []
121
+ for index, row in df_duplicate.iterrows():
122
+ countries = row['Countries'].split(',')
123
+ for country in countries:
124
+ new_row = row.copy()
125
+ new_row['Countries'] = country
126
+ new_rows.append(new_row)
127
+
128
+ new_df = pd.DataFrame(new_rows)
129
+ new_df = new_df.drop_duplicates()
130
+ try :
131
+ new_df = new_df.rename(columns={'Countries': 'Country'})
132
+ except :
133
+ pass
134
+
135
+ return new_df
136
+
137
+
138
+
139
+
140
+ def finale_merge(data, new_data, produit_id, class_id):
141
+
142
+ merged_df = pd.merge(data, new_data, on=["COUNTRY_KEY", produit_id], how="left", suffixes=("", "_y"))
143
+ merged_df[class_id] = merged_df[f"{class_id}_y"].fillna(merged_df[class_id])
144
+ merged_df[f"{class_id[:-4]}_DESC_FR"] = merged_df[f"{class_id[:-4]}_DESC_FR_y"].fillna(merged_df[f"{class_id[:-4]}_DESC_FR"])
145
+
146
+ df_finale = merged_df[[produit_id, "COUNTRY_KEY", class_id, f"{class_id[:-4]}_DESC_FR"]]
147
+
148
+ merged = pd.merge(data, df_finale, how='outer', indicator=True)
149
+
150
+ data_finale = merged[merged['_merge'] != 'both']
151
+
152
+ data_finale = data_finale.rename(columns={'_merge': 'Changements'})
153
+
154
+ data_finale.sort_values(by =[produit_id], ascending=True, inplace =True)
155
+
156
+ data_finale["Changements"] = data_finale["Changements"].apply(lambda x : "Avant" if x == "left_only" else "Après")
157
+
158
+ data_finale = data_finale[[produit_id, "COUNTRY_KEY", class_id, f"{class_id[:-4]}_DESC_FR", "Changements"]]
159
+ data_finale.drop_duplicates(inplace=True)
160
+
161
+ return data_finale, df_finale
162
+
163
+
164
+
165
+ def data_1_FR(df, produit_id, class_id):
166
+
167
+ df_f_f = df[df.Country == "FRA"]
168
+
169
+ barcodes = df_f_f[produit_id].unique()
170
+ max = 0
171
+ Barcodes = []
172
+ for barcode in barcodes:
173
+ items = df_f_f.item_key[df_f_f[produit_id] == barcode].tolist()
174
+ if len(items) == 2:
175
+ Barcodes.append(barcode)
176
+ if "R" in items[0]:
177
+ df_f_f.loc[(df_f_f[produit_id] == barcode) & (df_f_f.item_key == items[0]), class_id] = df_f_f.loc[(df_f_f[produit_id] == barcode) & (df_f_f.item_key == items[1]), class_id].values
178
+ df_f_f.loc[(df_f_f[produit_id] == barcode) & (df_f_f.item_key == items[0]), f'{class_id[:-3]}_DESC_FR'] = df_f_f.loc[(df_f_f[produit_id] == barcode) & (df_f_f.item_key == items[1]), f'{class_id[:-3]}_DESC_FR'].values
179
+
180
+ if "R" in items[1]:
181
+ df_f_f.loc[(df_f_f[produit_id] == barcode) & (df_f_f.item_key == items[1]), class_id] = df_f_f.loc[(df_f_f[produit_id] == barcode) & (df_f_f.item_key == items[0]), class_id].values
182
+ df_f_f.loc[(df_f_f[produit_id] == barcode) & (df_f_f.item_key == items[1]), f'{class_id[:-3]}_DESC_FR'] = df_f_f.loc[(df_f_f[produit_id] == barcode) & (df_f_f.item_key == items[0]), f'{class_id[:-3]}_DESC_FR'].values
183
+ return df_f_f
App/utils/__pycache__/divers_function.cpython-311.pyc ADDED
Binary file (2.39 kB). View file
 
App/utils/__pycache__/login.cpython-311.pyc ADDED
Binary file (7.98 kB). View file
 
App/utils/__pycache__/priorite_pays.cpython-311.pyc ADDED
Binary file (625 Bytes). View file
 
App/utils/divers_function.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+
4
+ @st.cache_data
5
+ def convert_df(df):
6
+ return df.to_csv().encode('utf-8')
7
+
8
+ @st.cache_data
9
+ def supprime_country(df):
10
+ try :
11
+ df.drop(["Country"], axis = 1, inplace = True)
12
+ except :
13
+ try :
14
+ df.drop(["COUNTRY_KEY"], axis = 1, inplace = True)
15
+ except :
16
+ try :
17
+ df.drop(["COUNTRY"], axis = 1, inplace = True)
18
+ except :
19
+ pass
20
+ return df
21
+
22
+
23
+
24
+
25
+ def Merger(df, data_tr, produit_id, class_id):
26
+ keys = data_tr[produit_id].unique()
27
+ df_finale_v1 = df[df[produit_id].isin(keys)]
28
+
29
+ df_finale_v1.loc[:,f'old_{class_id}'] = df_finale_v1.loc[:,class_id]
30
+
31
+ merged_df = pd.merge(df_finale_v1, data_tr, on=[produit_id], how='inner', indicator=True)
32
+
33
+ merged_df[class_id] = merged_df[f'{class_id}_x'].fillna(merged_df[f'old_{class_id}'])
34
+
35
+ # Filtrer les lignes où 'class_id' a été modifié
36
+ merged_df = merged_df[merged_df[f'old_{class_id}'] != merged_df[f'{class_id}_y']]
37
+ finale_df = merged_df[["COUNTRY_KEY", "ITEM_DESC_x",f"old_{class_id}",f'{class_id[:-4]}_DESC_FR_x', f'{class_id}_y', f'{class_id[:-4]}_DESC_FR_y',"ITEM_DESC_y","nombre","total_by_ligne", "Proportion", "Countries"]]
38
+ return finale_df
39
+
App/utils/login.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import bcrypt
3
+ import streamlit as st
4
+ import pandas as pd
5
+ from App.class_input_box.input_box import *
6
+ from App.functions_rupture.functions_gestion import *
7
+ from App.utils.divers_function import *
8
+ from App.utils.login import *
9
+
10
+ from streamlit_extras.chart_container import chart_container
11
+
12
+
13
+ def hash_password(password):
14
+ salt = bcrypt.gensalt()
15
+ hashed_password = bcrypt.hashpw(password.encode('utf-8'), salt)
16
+ return hashed_password
17
+
18
+ def validate_login(email, password):
19
+ state = SessionState.get(is_authenticated=False)
20
+ stored_email = "rupture-gestion" # Example stored email
21
+ stored_password =hash_password("Carrefour123!") # Example hashed password
22
+
23
+ if email == stored_email and bcrypt.checkpw(password.encode('utf-8'), stored_password):
24
+ state.is_authenticated = True
25
+
26
+ return state.is_authenticated
27
+
28
+ def login_page(state):
29
+
30
+ email = st.text_input("Email")
31
+ password = st.text_input("Password",type="password")
32
+
33
+ check_password = st.button('Login')
34
+
35
+ if check_password:
36
+ return validate_login(email,password)
37
+ return state.is_authenticated
38
+
39
+
40
+ @st.cache(suppress_st_warning=True, allow_output_mutation=True)
41
+ def main_() :
42
+
43
+ email, password, login = login_page()
44
+
45
+ if login:
46
+ if validate_login(email, password):
47
+ st.sidebar.success("Login successful!")
48
+
49
+ st.title("Gestion des ruptures ")
50
+
51
+ input_box = InputsBox()
52
+ data = input_box.get_data()
53
+
54
+ try:
55
+ if data.shape[0] != 0 :
56
+ st.header("Data")
57
+
58
+ st.dataframe(data)
59
+
60
+ "## Filters"
61
+
62
+ col1, col2 = st.columns(2)
63
+
64
+ with col1 :
65
+ product_id = input_box.get_product_id()
66
+
67
+ with col2 :
68
+ class_id = input_box.get_class_id()
69
+
70
+ col1, col2 = st.columns(2)
71
+
72
+ with col1 :
73
+ min_product_id = input_box.valid_produict_id()
74
+
75
+ with col2 :
76
+ vaind_class_id = input_box.valid_class_id()
77
+
78
+ conditions = input_box.conditions()
79
+
80
+ if st.button("RUN ", key="run_button"):
81
+ data = valide_key(data, product_id, class_id, min_product_id, vaind_class_id )
82
+ Country, merged = nouvelle_data(data,
83
+ str(product_id),
84
+ str(class_id))
85
+
86
+ merged_final = finale_merged(merged,
87
+ Country,
88
+ product_id,
89
+ class_id)
90
+
91
+ if conditions["Show data with ratios"]:
92
+ st.subheader("Show data with ratios")
93
+ st.dataframe(merged_final)
94
+
95
+ csv = convert_df(merged_final)
96
+ st.download_button(label="Download data as CSV",
97
+ data=csv,
98
+ file_name='sample_df.csv',
99
+ mime='text/csv',)
100
+
101
+ data_countries_ratio = cond_pays_proportion(merged_final,
102
+ conditions["Number of countries"],
103
+ conditions["Proportion"],
104
+ product_id)
105
+
106
+ df = supprime_country(data_countries_ratio)
107
+ csv = convert_df(df)
108
+
109
+ """## The data below is filtered as follows: """
110
+ "- Number of countries greater than or equal to ", conditions["Number of countries"]
111
+ "- The proportion with the highest ", class_id ," is greater than or equal to ",conditions["Proportion"]
112
+
113
+ finale_df = Merger(data,
114
+ df,
115
+ product_id,
116
+ class_id)
117
+
118
+ tab1, tab2 = st.tabs(["Data without decision-making", "Data with proposed changes"])
119
+
120
+ with tab1 :
121
+ st.subheader("Data without decision-making")
122
+ st.dataframe(df)
123
+ st.download_button(label="Download data as CSV",
124
+ data=csv,
125
+ file_name='sample_df.csv',
126
+ mime='text/csv',)
127
+
128
+ with tab2 :
129
+ st.subheader("Data with proposed changes")
130
+ st.dataframe(finale_df)
131
+ csv_f = convert_df(finale_df)
132
+ st.download_button(label="Download data as CSV",
133
+ data=csv_f,
134
+ file_name='sample_df.csv',
135
+ mime='text/csv',)
136
+
137
+ "## Country priority "
138
+
139
+ priority_data = cond_pays_priorite(merged_final, product_id)
140
+
141
+ tab1, tab2 = st.tabs(["Data without decision-making", "Data with proposed changes"])
142
+
143
+ with tab1 :
144
+ st.subheader("Data without decision-making")
145
+ st.dataframe(priority_data)
146
+ csv_f = convert_df(priority_data)
147
+ st.download_button(label="Download data as CSV",
148
+ data=csv_f,
149
+ file_name='sample_df.csv',
150
+ mime='text/csv',)
151
+
152
+ with tab2 :
153
+ "to do"
154
+
155
+ except:
156
+ pass
157
+ st.write("An error occured. Please check your inputs.")
158
+
159
+ else:
160
+ st.error("Identifiant ou mot de passe incorrect !")
161
+
162
+ if __name__ == "__main__":
163
+ main_()
App/utils/priorite_pays.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ pays_all = ["FRA", "BEL" ,"ESP","ITA", "BRA","ATA","ARG", "POL", "ROU", "BIG","SAM",
4
+ "UAE","SAU","KWT","OMN","BHR","QAT","JOR","EGY","ARM","UZB","IRN","KEN","GEO","LEB","UGA","PAK","IRQ",
5
+ "MTQ","GLP","REU","GUA","MTS","GLS","GUF","MTA","GLA","GUS","SXM","DOM",
6
+ "MAR","AMA","TUN","DZA","TUR","IAP","IET","TWN"]
7
+
8
+ dico = {}
9
+ for i in pays_all:
10
+ dico[i] = len(pays_all) - pays_all.index(i)
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11
2
+
3
+ # Expose port you want your app on
4
+ EXPOSE 8080
5
+
6
+ # Upgrade pip and install requirements
7
+ COPY requirements.txt requirements.txt
8
+ RUN pip install -U pip
9
+ RUN pip install -r requirements.txt
10
+
11
+ # Copy app code and set working directory
12
+ COPY . ./
13
+
14
+ WORKDIR /rupture
15
+
16
+ # Run
17
+ ENTRYPOINT ["streamlit", "run", "main.py", "–server.port=8080", "–server.address=0.0.0.0"]
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
RUPTURE.egg-info/PKG-INFO ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: RUPTURE
3
+ Version: 0.0.0
4
+ License-File: LICENSE
RUPTURE.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ LICENSE
2
+ README.md
3
+ setup.py
4
+ RUPTURE.egg-info/PKG-INFO
5
+ RUPTURE.egg-info/SOURCES.txt
6
+ RUPTURE.egg-info/dependency_links.txt
7
+ RUPTURE.egg-info/top_level.txt
RUPTURE.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
RUPTURE.egg-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
app.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ runtime: python310
2
+ service: default
essaie.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import time
4
+ from App.class_input_box.input_box import *
5
+ from App.functions_rupture.functions_gestion import *
6
+ from App.utils.divers_function import *
7
+ from App.utils.login import *
8
+
9
+
10
+ def app():
11
+ st.title("Gestion des ruptures ")
12
+
13
+ input_box = InputsBox()
14
+
15
+ data = input_box.get_data()
16
+
17
+ try:
18
+ if data.shape[0] != 0 :
19
+ st.header("Data")
20
+
21
+ st.dataframe(data)
22
+
23
+ "## Parameters"
24
+
25
+ col1, col2 = st.columns(2)
26
+
27
+ with col1 :
28
+ product_id = input_box.get_product_id()
29
+
30
+ with col2 :
31
+ class_id = input_box.get_class_id()
32
+
33
+ '## Filters'
34
+ col1, col2 = st.columns(2)
35
+
36
+ with col1 :
37
+ min_product_id = input_box.valid_produict_id()
38
+
39
+ with col2 :
40
+ vaind_class_id = input_box.valid_class_id()
41
+
42
+ conditions = input_box.conditions()
43
+
44
+ if st.button("RUN ", key="run_button"):
45
+ data = valide_key(data, product_id, class_id, min_product_id, vaind_class_id )
46
+ Country, merged = nouvelle_data(data,
47
+ str(product_id),
48
+ str(class_id))
49
+
50
+ merged_final = finale_merged(merged,
51
+ Country,
52
+ product_id,
53
+ class_id)
54
+
55
+ if conditions["Show data with ratios"]:
56
+ st.subheader("Show data with ratios")
57
+ merged_final.loc[:, "Evaluation"]= True
58
+ merged_final = st.data_editor(merged_final)
59
+ #st.dataframe(merged_final)
60
+
61
+ csv = convert_df(merged_final)
62
+ st.download_button(label="Download data as CSV",
63
+ data=csv,
64
+ file_name='sample_df.csv',
65
+ mime='text/csv',)
66
+
67
+ data_countries_ratio = cond_pays_proportion(merged_final,
68
+ conditions["Number of countries"],
69
+ conditions["Proportion"],
70
+ product_id)
71
+
72
+ df = supprime_country(data_countries_ratio)
73
+ csv = convert_df(df)
74
+
75
+ """## The data below is filtered as follows: """
76
+ "- Number of countries greater than or equal to ", conditions["Number of countries"]
77
+ "- The proportion with the highest ", class_id ," is greater than or equal to ",conditions["Proportion"]
78
+
79
+ DF = df[((df.Proportion >= conditions["Proportion"]) & (df.total_by_ligne >= conditions["Number of countries"]))]
80
+
81
+ finale_df = Merger(data,
82
+ DF,
83
+ product_id,
84
+ class_id)
85
+
86
+ tab1, tab2 = st.tabs(["Data without decision-making", "Data with proposed changes"])
87
+
88
+ with tab1 :
89
+ st.subheader("Data without decision-making")
90
+ df.loc[:, "Evaluation"] = True
91
+ df = st.data_editor(df)
92
+ #st.dataframe(df)
93
+ st.download_button(label="Download data as CSV",
94
+ data=csv,
95
+ file_name='sample_df.csv',
96
+ mime='text/csv',)
97
+
98
+ with tab2 :
99
+ st.subheader("Data with proposed changes")
100
+ finale_df.loc[:, "Evaluation"] = True
101
+ finale_df = st.data_editor(finale_df)
102
+ #st.dataframe(finale_df)
103
+ csv_f = convert_df(finale_df)
104
+ st.download_button(label="Download data as CSV",
105
+ data=csv_f,
106
+ file_name='sample_df.csv',
107
+ mime='text/csv',)
108
+
109
+ "## Country priority "
110
+
111
+ priority_data, df_equa, df_nequa = cond_pays_priorite(merged_final, product_id)
112
+ tab1, tab2, tab3, tab4 = st.tabs(["Data without decision-making", "Equality case and mt1", "Cases of inequality", "Data with proposed changes mt2"])
113
+
114
+ with tab1 :
115
+ st.subheader("Data without decision-making")
116
+ priority_data.loc[:, "Evaluation"] = True
117
+ priority_data = st.data_editor(priority_data)
118
+ #st.dataframe(priority_data)
119
+ csv_f = convert_df(priority_data)
120
+ st.download_button(label="Download data as CSV",
121
+ data=csv_f,
122
+ file_name='sample_df.csv',
123
+ mime='text/csv',)
124
+
125
+ with tab2 :
126
+ st.subheader("Equality case")
127
+ df_equa.loc[:, "Evaluation"]= True
128
+ df_equa = st.data_editor(df_equa)
129
+ #st.dataframe(df_equa)
130
+ csv_f = convert_df(df_equa)
131
+ st.download_button(label="Download data as CSV",
132
+ data=csv_f,
133
+ file_name='sample_df.csv',
134
+ mime='text/csv',)
135
+
136
+ with tab3 :
137
+ st.subheader("Cases of inequality")
138
+ df_nequa_ = df_nequa[(df_nequa.total_by_ligne.apply(lambda x: int(x) > 2))]
139
+ df_nequa_.loc[:, "Evaluation"]= True
140
+ df_nequa_ = st.data_editor(df_nequa_)
141
+
142
+ csv_f = convert_df(df_nequa_)
143
+ st.download_button(label="Download data as CSV",
144
+ data=csv_f,
145
+ file_name='sample_df.csv',
146
+ mime='text/csv',)
147
+
148
+
149
+ #df_finale= data_1_1(priority_data, product_id, class_id)
150
+ max_poids_index = df_nequa_.groupby('BARCODE')['Poids'].idxmax()
151
+
152
+ # Récupérer les lignes correspondantes
153
+ df_max_poids = df_nequa_.loc[max_poids_index]
154
+ df_max_poids.drop(["COUNTRY_KEY"], axis = 1, inplace= True)
155
+ finale_df_ = Merger(data,df_max_poids, product_id, class_id) #, _ = finale_merge(data, df_finale, product_id, class_id)
156
+ with tab4 :
157
+ st.subheader("Cases of inequality")
158
+ finale_df_.loc[:, "Evaluation"]= True
159
+ finale_df_ = st.data_editor(finale_df_)
160
+
161
+ csv_f = convert_df(finale_df_)
162
+ st.download_button(label="Download data as CSV",
163
+ data=csv_f,
164
+ file_name='sample_df.csv',
165
+ mime='text/csv',)
166
+
167
+ st.subheader(" One vs One with similarity score")
168
+ df_nequa_1 = df_nequa[(df_nequa.total_by_ligne.apply(lambda x: int(x) == 2))]
169
+
170
+ max_poids_index1 = df_nequa_1.groupby('BARCODE')['Poids'].idxmax()
171
+
172
+ # Récupérer les lignes correspondantes
173
+ df_max_poids1 = df_nequa_1.loc[max_poids_index1]
174
+ df_max_poids1.drop(["COUNTRY_KEY"], axis = 1, inplace= True)
175
+ finale_df_1 = Merger(data,df_max_poids1, product_id, class_id)
176
+ finale_df_1.loc[:, "Evaluation"]= True
177
+ finale_df_1 = st.data_editor(finale_df_1)
178
+
179
+ st.success('Done!', icon="✅")
180
+ st.balloons()
181
+
182
+ except:
183
+ pass
184
+ st.error('This is an error', icon="🚨")
185
+ st.info('Ensure that column names are capitalized and that product_id and class_id descriptions are present, as well as a country column.', icon="ℹ️")
186
+
187
+
188
+ if __name__ == "__main__":
189
+
190
+ st.toast("Je t'aime Kadi Jolie ")
191
+ time.sleep(.5)
192
+ st.toast('I love you Kadi my wife')
193
+ time.sleep(.5)
194
+ st.toast('Holà!', icon='🎉')
195
+ app()
logo.png ADDED
main.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ from App.class_input_box.input_box import *
4
+ from App.functions_rupture.functions_gestion import *
5
+ from App.utils.divers_function import *
6
+ from App.utils.login import *
7
+
8
+ from streamlit_extras.chart_container import chart_container
9
+
10
+
11
+ st.title("Gestion des ruptures ")
12
+
13
+ input_box = InputsBox()
14
+ data = input_box.get_data()
15
+
16
+ try:
17
+ if data.shape[0] != 0 :
18
+ st.header("Data")
19
+
20
+ st.dataframe(data)
21
+
22
+ "## Filters"
23
+
24
+ col1, col2 = st.columns(2)
25
+
26
+ with col1 :
27
+ product_id = input_box.get_product_id()
28
+
29
+ with col2 :
30
+ class_id = input_box.get_class_id()
31
+
32
+ col1, col2 = st.columns(2)
33
+
34
+ with col1 :
35
+ min_product_id = input_box.valid_produict_id()
36
+
37
+ with col2 :
38
+ vaind_class_id = input_box.valid_class_id()
39
+
40
+ conditions = input_box.conditions()
41
+
42
+ if st.button("RUN ", key="run_button"):
43
+ data = valide_key(data, product_id, class_id, min_product_id, vaind_class_id )
44
+ Country, merged = nouvelle_data(data,
45
+ str(product_id),
46
+ str(class_id))
47
+
48
+ merged_final = finale_merged(merged,
49
+ Country,
50
+ product_id,
51
+ class_id)
52
+
53
+ if conditions["Show data with ratios"]:
54
+ st.subheader("Show data with ratios")
55
+ st.dataframe(merged_final)
56
+
57
+ csv = convert_df(merged_final)
58
+ st.download_button(label="Download data as CSV",
59
+ data=csv,
60
+ file_name='sample_df.csv',
61
+ mime='text/csv',)
62
+
63
+ data_countries_ratio = cond_pays_proportion(merged_final,
64
+ conditions["Number of countries"],
65
+ conditions["Proportion"],
66
+ product_id)
67
+
68
+ df = supprime_country(data_countries_ratio)
69
+ csv = convert_df(df)
70
+
71
+ """## The data below is filtered as follows: """
72
+ "- Number of countries greater than or equal to ", conditions["Number of countries"]
73
+ "- The proportion with the highest ", class_id ," is greater than or equal to ",conditions["Proportion"]
74
+
75
+ finale_df = Merger(data,
76
+ df,
77
+ product_id,
78
+ class_id)
79
+
80
+ tab1, tab2 = st.tabs(["Data without decision-making", "Data with proposed changes"])
81
+
82
+ with tab1 :
83
+ st.subheader("Data without decision-making")
84
+ st.dataframe(df)
85
+ st.download_button(label="Download data as CSV",
86
+ data=csv,
87
+ file_name='sample_df.csv',
88
+ mime='text/csv',)
89
+
90
+ with tab2 :
91
+ st.subheader("Data with proposed changes")
92
+ st.dataframe(finale_df)
93
+ csv_f = convert_df(finale_df)
94
+ st.download_button(label="Download data as CSV",
95
+ data=csv_f,
96
+ file_name='sample_df.csv',
97
+ mime='text/csv',)
98
+
99
+ "## Country priority "
100
+
101
+ priority_data = cond_pays_priorite(merged_final, product_id)
102
+
103
+ tab1, tab2 = st.tabs(["Data without decision-making", "Data with proposed changes"])
104
+
105
+ with tab1 :
106
+ st.subheader("Data without decision-making")
107
+ st.dataframe(priority_data)
108
+ csv_f = convert_df(priority_data)
109
+ st.download_button(label="Download data as CSV",
110
+ data=csv_f,
111
+ file_name='sample_df.csv',
112
+ mime='text/csv',)
113
+
114
+ with tab2 :
115
+ "to do"
116
+
117
+ except:
118
+ pass
119
+ st.write("An error occured. Please check your inputs.")
query ADDED
@@ -0,0 +1 @@
 
 
1
+ docker
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ numpy==1.23.4
2
+ pandas==1.3.4
3
+ setuptools==65.6.3
4
+ streamlit==1.25.0
5
+ streamlit_extras==0.3.0
6
+ gunicorn
7
+ nltk
8
+ bcrypt
rupture ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit a552d499acb5c1c855daf3e23e8d3582ec467f09
setup.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(name="RUPTURE", packages=find_packages())
start ADDED
@@ -0,0 +1 @@
 
 
1
+ docker
stop ADDED
@@ -0,0 +1 @@
 
 
1
+ docker