Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- app.py +440 -0
- magna.png +0 -0
- requirements.txt +7 -0
app.py
ADDED
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""
|
3 |
+
Created on Wed Sep 27 17:45:59 2023
|
4 |
+
env: dardos2
|
5 |
+
@author: forti
|
6 |
+
|
7 |
+
-Grafica training y validation sets para comparar eficiencia de pronostico
|
8 |
+
- Grafica con plotly y matplotlib.
|
9 |
+
-Genera df_reindexed con periodo semanal
|
10 |
+
-Calcula sigma y safety stock
|
11 |
+
- Calcula MOVING AVERAGE ( descomentar)
|
12 |
+
|
13 |
+
"""
|
14 |
+
|
15 |
+
import numpy as np
|
16 |
+
import pandas as pd
|
17 |
+
# import matplotlib.pyplot as plt
|
18 |
+
import plotly.io as pio
|
19 |
+
import plotly.express as px
|
20 |
+
from darts import TimeSeries
|
21 |
+
from darts.models import RandomForest
|
22 |
+
# from darts.models import NaiveDrift
|
23 |
+
# from darts.models import MovingAverageFilter
|
24 |
+
import streamlit as st
|
25 |
+
# import locale
|
26 |
+
from darts.metrics import mae, rmse
|
27 |
+
from PIL import Image
|
28 |
+
|
29 |
+
|
30 |
+
###########################################################################
|
31 |
+
@st.cache_data
|
32 |
+
def convert_df(df_new):
|
33 |
+
# IMPORTANT: Cache the conversion to prevent computation on every rerun
|
34 |
+
return df_new.to_csv().encode('utf-8')
|
35 |
+
###########################################################################
|
36 |
+
|
37 |
+
|
38 |
+
pio.renderers.default = "browser"
|
39 |
+
|
40 |
+
st.title(" 🏪 Super Playitas")
|
41 |
+
st.header("🤖 Pronósticos con Inteligencia Artificial 🤖")
|
42 |
+
df = None
|
43 |
+
uploaded_file = st.file_uploader("Seleccione su producto", type='xlsx')
|
44 |
+
|
45 |
+
if uploaded_file is not None:
|
46 |
+
df = pd.read_excel(uploaded_file)
|
47 |
+
# st.write(data)
|
48 |
+
|
49 |
+
if df is not None:
|
50 |
+
|
51 |
+
################ Limpieza de datos ###########################
|
52 |
+
# df = pd.read_excel('marlboro-20.xlsx')
|
53 |
+
# df = pd.read_excel('cocacola-600.xlsx')
|
54 |
+
df.drop([0, 1, 2, 3, 4, 5], inplace=True)
|
55 |
+
df.columns = df.iloc[0]
|
56 |
+
df.drop(6, inplace=True)
|
57 |
+
###########################################################
|
58 |
+
|
59 |
+
# Set to Spanish or English locale
|
60 |
+
#locale.setlocale(locale.LC_TIME, "en_US.UTF-8")
|
61 |
+
# locale.setlocale(locale.LC_TIME, "es_US.UTF-8")
|
62 |
+
|
63 |
+
|
64 |
+
# Create a dictionary mapping Spanish month abbreviations to English
|
65 |
+
month_dict = {
|
66 |
+
'Ene': 'Jan',
|
67 |
+
'Feb': 'Feb',
|
68 |
+
'Mar': 'Mar',
|
69 |
+
'Abr': 'Apr',
|
70 |
+
'May': 'May',
|
71 |
+
'Jun': 'Jun',
|
72 |
+
'Jul': 'Jul',
|
73 |
+
'Ago': 'Aug',
|
74 |
+
'Sep': 'Sep',
|
75 |
+
'Oct': 'Oct',
|
76 |
+
'Nov': 'Nov',
|
77 |
+
'Dic': 'Dec'
|
78 |
+
}
|
79 |
+
|
80 |
+
# Assume df is your DataFrame and 'date' is the column with dates
|
81 |
+
df['Fecha'] = df['Fecha'].replace(month_dict, regex=True)
|
82 |
+
|
83 |
+
# convert the date column into datetime
|
84 |
+
df['Fecha'] = pd.to_datetime(df['Fecha'], format='%d/%b/%Y %I:%M %p', errors='coerce')
|
85 |
+
|
86 |
+
# rename columns
|
87 |
+
df.columns.values[2] = 'Cantidad-1'
|
88 |
+
df.columns.values[5] = 'Cantidad-2'
|
89 |
+
df.columns.values[8] = 'Cantidad-3'
|
90 |
+
df['Cantidad-2'] = pd.to_numeric(df['Cantidad-2'], errors='coerce')
|
91 |
+
|
92 |
+
# lee el ultimo valor de la columna de cantidad-3
|
93 |
+
df['Cantidad-3'] = pd.to_numeric(df['Cantidad-3'], errors='coerce')
|
94 |
+
inventario_neto = df['Cantidad-3'].iloc[-1]
|
95 |
+
|
96 |
+
# drop NaN rows from column "Cantidad-2"
|
97 |
+
df.dropna(subset=['Cantidad-2'], inplace=True)
|
98 |
+
|
99 |
+
# drop negative values from "Cantidad-2"
|
100 |
+
df = df[df['Cantidad-2'] >= 0]
|
101 |
+
|
102 |
+
# Create a new column 'Fecha_sin_hora' with just the date
|
103 |
+
df['Fecha_sin_hora'] = df['Fecha'].dt.date
|
104 |
+
|
105 |
+
# Group by 'Fecha_sin_hora' and sum 'Cantidad-2'
|
106 |
+
df_sum = df.groupby('Fecha_sin_hora')['Cantidad-2'].sum().reset_index()
|
107 |
+
|
108 |
+
###############################################################
|
109 |
+
# # Create a new column 'Mes' with just the month and year
|
110 |
+
# df['Mes'] = df['Fecha_sin_hora'].dt.to_period('M')
|
111 |
+
|
112 |
+
# # Group by 'Mes' and sum 'Cantidad-2'
|
113 |
+
# df_sum = df.groupby('Mes')['Cantidad-2'].sum().reset_index()
|
114 |
+
|
115 |
+
###############################################################
|
116 |
+
|
117 |
+
# df_sum.drop(737, inplace=True)
|
118 |
+
# df_sum = df_sum.iloc[699:800]
|
119 |
+
|
120 |
+
|
121 |
+
|
122 |
+
|
123 |
+
|
124 |
+
################################################################
|
125 |
+
######## Create a DataFrame with a new date range ############################
|
126 |
+
|
127 |
+
# Create the date range
|
128 |
+
# new_date_range = pd.date_range(start='2023-03-12', end='2023-08-11')
|
129 |
+
new_date_range = pd.date_range(start=df_sum.iloc[0,0], end=df_sum.iloc[-1,0])
|
130 |
+
|
131 |
+
# Convert the date range to a DataFrame
|
132 |
+
#df_3 = pd.DataFrame({'Fecha': new_date_range})
|
133 |
+
##################################################################
|
134 |
+
|
135 |
+
df_sum.set_index('Fecha_sin_hora', inplace=True)
|
136 |
+
|
137 |
+
|
138 |
+
# Re-index with a new date-range for each group
|
139 |
+
#df_reindexed = df_sum.apply(lambda x: x.set_index('Fecha_sin_hora').reindex(new_date_range))
|
140 |
+
df_reindexed = df_sum.apply(lambda x: x.reindex(new_date_range))
|
141 |
+
|
142 |
+
# Reset the index
|
143 |
+
#df_reindexed = df_reindexed.reset_index(level=0, drop=True).reset_index()
|
144 |
+
|
145 |
+
|
146 |
+
# Fill missing sales values with 0
|
147 |
+
df_reindexed['Cantidad-2'] = df_reindexed['Cantidad-2'].fillna(0)
|
148 |
+
|
149 |
+
|
150 |
+
#################################################################
|
151 |
+
# Create a new column 'Semana'
|
152 |
+
|
153 |
+
df_reindexed.reset_index(inplace=True)
|
154 |
+
df_reindexed = df_reindexed.rename(columns={'index': 'Fecha'})
|
155 |
+
df_reindexed['Semana'] = df_reindexed['Fecha'].dt.to_period('W')
|
156 |
+
|
157 |
+
# Group by 'Semana' and sum 'Cantidad-2'
|
158 |
+
df_reindexed = df_reindexed.groupby('Semana')['Cantidad-2'].sum().reset_index()
|
159 |
+
# rename columns
|
160 |
+
df_reindexed.columns.values[0] = 'Fecha'
|
161 |
+
|
162 |
+
# ax = df_reindexed.plot(label='Historial')
|
163 |
+
# plt.show()
|
164 |
+
|
165 |
+
|
166 |
+
#hacer fecha el indice
|
167 |
+
# df_reindexed.set_index('Fecha', inplace=True)
|
168 |
+
df_reindexed['Fecha'] = df_reindexed['Fecha'].astype(str)
|
169 |
+
df_reindexed['Fecha'] = df_reindexed['Fecha'].str.split('/').str[0]
|
170 |
+
|
171 |
+
|
172 |
+
|
173 |
+
|
174 |
+
# usar para pronostico de las ultimas dos semanas. fecha automatizada
|
175 |
+
# dt = pd.to_datetime(df_sum.index[1])
|
176 |
+
# dt = dt + pd.Timedelta(days=1)
|
177 |
+
#################################################################
|
178 |
+
|
179 |
+
# Convert datetime to string to be able to use DARTS
|
180 |
+
#df_sum['Fecha_sin_hora'] = df_sum['Fecha_sin_hora'].astype(str)
|
181 |
+
|
182 |
+
|
183 |
+
########## Reset the index. Use only for daily df_reindexed ##################
|
184 |
+
# df_reindexed['Fecha'] = df_reindexed.index
|
185 |
+
# df_reindexed.reset_index(drop=True, inplace=True)
|
186 |
+
# df_reindexed = df_reindexed[['Fecha', 'Cantidad-2']]
|
187 |
+
|
188 |
+
|
189 |
+
# Create a TimeSeries object, specifying the time and value columns
|
190 |
+
series = TimeSeries.from_dataframe(df_reindexed, "Fecha", 'Cantidad-2')
|
191 |
+
# series = TimeSeries.from_dataframe(df_reindexed, freq="W")
|
192 |
+
|
193 |
+
# Assuming df_reindexed is your DataFrame and it's indexed by date
|
194 |
+
# series = TimeSeries.from_times_and_values(times=df_reindexed, values=df_reindexed['Cantidad-2'])
|
195 |
+
|
196 |
+
|
197 |
+
|
198 |
+
# Set aside the last 16 days as a validation series
|
199 |
+
train, val = series[:-15], series[-15:]
|
200 |
+
|
201 |
+
#################### Train Forecasting Models ######################
|
202 |
+
# Train baseline model
|
203 |
+
# model_1 = NaiveDrift()
|
204 |
+
# model_1.fit(series)
|
205 |
+
|
206 |
+
# Train ML model
|
207 |
+
model_2 = RandomForest(lags=20) #25 #55
|
208 |
+
model_2a = RandomForest(lags=20)
|
209 |
+
# model_2.fit(train)
|
210 |
+
model_2.fit(series)
|
211 |
+
model_2a.fit(train)
|
212 |
+
|
213 |
+
# MOVING AVERAGE
|
214 |
+
# model_3 = MovingAverageFilter(window=15)
|
215 |
+
# filtered_series = model_3.filter(series)
|
216 |
+
####################################################################
|
217 |
+
|
218 |
+
################## Predict with Forecasting Models #################
|
219 |
+
# Predict using baseline model
|
220 |
+
# prediction_1 = model_1.predict(15)
|
221 |
+
|
222 |
+
# Predict using RandomForest
|
223 |
+
# prediction_2 = model_2.predict(len(val))
|
224 |
+
prediction_2 = model_2.predict(15)
|
225 |
+
prediction_2a = model_2a.predict(15)
|
226 |
+
# df_4 = pd.DataFrame(prediction_2.pd_dataframe())
|
227 |
+
# df_4 = df_4.reset_index(drop=True)
|
228 |
+
###################################################################
|
229 |
+
|
230 |
+
######## plot forecast from validation set ########################
|
231 |
+
|
232 |
+
# converts datetime objects to pandas dataframes
|
233 |
+
series_df = series.pd_dataframe()
|
234 |
+
prediction_2_df = prediction_2.pd_dataframe()
|
235 |
+
prediction_2a_df = prediction_2a.pd_dataframe()
|
236 |
+
val_df = val.pd_dataframe()
|
237 |
+
|
238 |
+
|
239 |
+
# series.plot(label="Historial de Ventas")
|
240 |
+
# prediction_2a.plot(label="Pronóstico", low_quantile=0.05, high_quantile=0.95)
|
241 |
+
|
242 |
+
# filtered_series.plot(label="Historial de Ventas")
|
243 |
+
# series.plot(label="Historial de Ventas")
|
244 |
+
|
245 |
+
# plt.xlabel('Fecha')
|
246 |
+
# plt.ylabel('Ventas')
|
247 |
+
# plt.legend()
|
248 |
+
# plt.subplots_adjust(bottom=0.2)
|
249 |
+
# plt.savefig("producto_forecast_2.png", dpi=200)
|
250 |
+
|
251 |
+
|
252 |
+
# n=115
|
253 |
+
# series_df = series_df.tail(n)
|
254 |
+
|
255 |
+
# ax = series_df.plot(label='Historial')
|
256 |
+
# prediction_2_df.plot(ax=ax, label='Pronostico')
|
257 |
+
# plt.legend()
|
258 |
+
# plt.show()
|
259 |
+
|
260 |
+
|
261 |
+
###############################################################################
|
262 |
+
|
263 |
+
|
264 |
+
#########################Cacula std del forecast ############################
|
265 |
+
|
266 |
+
error = prediction_2a_df["Cantidad-2"] - val_df["Cantidad-2"]
|
267 |
+
mean = error.mean()
|
268 |
+
diff = error - mean
|
269 |
+
diff_squared = diff.apply(lambda x:x**2).sum()
|
270 |
+
std = np.sqrt(diff_squared/(len(diff)-1))
|
271 |
+
safety_stock = 1.96*std
|
272 |
+
|
273 |
+
|
274 |
+
###############################################################################
|
275 |
+
########################## metricas ################################
|
276 |
+
|
277 |
+
mae_1 = mae(val, prediction_2a)
|
278 |
+
# mape_1 = mape(val, prediction_2a)
|
279 |
+
rmse_1 = rmse(val, prediction_2a)
|
280 |
+
|
281 |
+
|
282 |
+
##################### Pronostico final ##################################
|
283 |
+
|
284 |
+
promedio = prediction_2_df.mean()
|
285 |
+
|
286 |
+
pronostico_promedio = promedio + safety_stock
|
287 |
+
# semana = prediction_2_df.index[0]
|
288 |
+
|
289 |
+
#redondea y los numeros del df. si la borro siguie funcionando todo
|
290 |
+
prediction_2_df = prediction_2_df.round().astype(int)
|
291 |
+
safety_stock = safety_stock.round()
|
292 |
+
|
293 |
+
##################Grafica con plotly###########################################
|
294 |
+
# df_borrar = df_sum +1
|
295 |
+
# df_borrar = df_borrar.iloc[800:844]
|
296 |
+
|
297 |
+
figure_01=px.line(series_df,y="Cantidad-2",)
|
298 |
+
figure_01.layout.update(title_text=None,xaxis_rangeslider_visible=True)
|
299 |
+
|
300 |
+
figure_02=px.line(prediction_2_df,y="Cantidad-2")
|
301 |
+
figure_02.update_traces(line=dict(color='red'))
|
302 |
+
|
303 |
+
figure_03=px.scatter(series_df,y="Cantidad-2",)
|
304 |
+
figure_03.update_traces(marker=dict(color='DodgerBlue'))
|
305 |
+
|
306 |
+
figure_04=px.scatter(prediction_2_df,y="Cantidad-2",)
|
307 |
+
figure_04.update_traces(marker=dict(color='red'))
|
308 |
+
|
309 |
+
# st.plotly_chart(figure_01)
|
310 |
+
|
311 |
+
# Add figure_02 data to figure_01
|
312 |
+
for trace in figure_02.data:
|
313 |
+
figure_01.add_trace(trace)
|
314 |
+
|
315 |
+
# Add figure_03 data to figure_01
|
316 |
+
for trace in figure_03.data:
|
317 |
+
figure_01.add_trace(trace)
|
318 |
+
|
319 |
+
for trace in figure_04.data:
|
320 |
+
figure_01.add_trace(trace)
|
321 |
+
|
322 |
+
st.subheader(":blue[Historial] y :red[Pronóstico] de Ventas Semanal")
|
323 |
+
|
324 |
+
|
325 |
+
|
326 |
+
st.plotly_chart(figure_01)
|
327 |
+
|
328 |
+
###############################################################################
|
329 |
+
|
330 |
+
|
331 |
+
|
332 |
+
if mae_1 < 7:
|
333 |
+
|
334 |
+
st.subheader("Pronóstico de las proximas 15 semanas:")
|
335 |
+
prediction_2_total = prediction_2_df + safety_stock
|
336 |
+
prediction_2_total.index = pd.to_datetime(prediction_2_total.index).date
|
337 |
+
prediction_2_total.rename(columns={'Cantidad-2': 'Nivel de Inventario'}, inplace=True)
|
338 |
+
|
339 |
+
prediction_2_total['Pronostico'] = prediction_2_df['Cantidad-2']
|
340 |
+
# Create an array with the number repeated the desired number of times
|
341 |
+
stock = np.full((len(prediction_2_total), 1), safety_stock)
|
342 |
+
# Convert the array to a DataFrame
|
343 |
+
stock_df = pd.DataFrame(stock, columns=['safety']).astype(int)
|
344 |
+
|
345 |
+
prediction_2_total['Stock de Seguridad'] = stock_df['safety'].values
|
346 |
+
prediction_2_total = prediction_2_total[['Pronostico', 'Stock de Seguridad', 'Nivel de Inventario']]
|
347 |
+
prediction_2_total['Nivel de Inventario'] = prediction_2_total['Nivel de Inventario'].round().astype(int)
|
348 |
+
st.table(prediction_2_total)
|
349 |
+
|
350 |
+
|
351 |
+
else:
|
352 |
+
st.subheader("Pronóstico de las proximas 15 semanas:")
|
353 |
+
pronostico_promedio = pronostico_promedio.iloc[0]
|
354 |
+
|
355 |
+
# Create an array with the number repeated the desired number of times
|
356 |
+
pronostico_promedio = np.full((len(prediction_2_df), 1), pronostico_promedio)
|
357 |
+
# Convert the array to a DataFrame
|
358 |
+
pronostico_promedio_df = pd.DataFrame(pronostico_promedio, columns=['Nivel de Inventario'])
|
359 |
+
|
360 |
+
prediction_2_df["Nivel de Inventario"] = pronostico_promedio_df['Nivel de Inventario'].values
|
361 |
+
|
362 |
+
# Create an array with the number repeated the desired number of times
|
363 |
+
stock = np.full((len(prediction_2_df), 1), safety_stock)
|
364 |
+
# Convert the array to a DataFrame
|
365 |
+
stock_df = pd.DataFrame(stock, columns=['safety']).astype(int)
|
366 |
+
prediction_2_df['Stock de Seguridad'] = stock_df['safety'].values
|
367 |
+
|
368 |
+
prediction_2_total = prediction_2_df
|
369 |
+
prediction_2_total.index = pd.to_datetime(prediction_2_total.index).date
|
370 |
+
prediction_2_total.rename(columns={'Cantidad-2': 'Pronostico'}, inplace=True)
|
371 |
+
prediction_2_total = prediction_2_total[['Pronostico', 'Stock de Seguridad', 'Nivel de Inventario']]
|
372 |
+
|
373 |
+
prediction_2_total['Nivel de Inventario'] = prediction_2_total['Nivel de Inventario'].round().astype(int)
|
374 |
+
st.table(prediction_2_total)
|
375 |
+
|
376 |
+
|
377 |
+
#################################################################################
|
378 |
+
|
379 |
+
csv = convert_df(prediction_2_total)
|
380 |
+
st.download_button(
|
381 |
+
label="Descargar datos ⤵",
|
382 |
+
data=csv,
|
383 |
+
file_name='pronostico-producto.csv',
|
384 |
+
mime='text/csv',
|
385 |
+
)
|
386 |
+
|
387 |
+
|
388 |
+
cantidad = round(prediction_2_total.iloc[0,0] - inventario_neto)
|
389 |
+
|
390 |
+
if cantidad<0:
|
391 |
+
cantidad =0
|
392 |
+
|
393 |
+
# st.text("Cantidad =" + str(cantidad))
|
394 |
+
st.subheader("Cantidad de producto a ordenar (por semana): ")
|
395 |
+
# st.text("Cantidad = Pronostico - Inventario Neto")
|
396 |
+
|
397 |
+
|
398 |
+
st.metric(label=":blue[Cantidad = Nivel de Inventario - Inventario Neto]", value=cantidad)
|
399 |
+
st.write('\n')
|
400 |
+
st.write('\n')
|
401 |
+
st.write('\n')
|
402 |
+
st.write('\n')
|
403 |
+
st.write('\n')
|
404 |
+
st.write('\n')
|
405 |
+
st.write('\n')
|
406 |
+
st.write('\n')
|
407 |
+
st.write('\n')
|
408 |
+
|
409 |
+
image = Image.open("magna.png")
|
410 |
+
st.image(image, caption='Inteligencia Artificial para tu negocio')
|
411 |
+
st.markdown("Contáctanos: www.magna-machina.com")
|
412 |
+
|
413 |
+
|
414 |
+
else:
|
415 |
+
st.subheader("No ha seleccionado su producto.")
|
416 |
+
st.write('\n')
|
417 |
+
st.write('\n')
|
418 |
+
st.write('\n')
|
419 |
+
st.write('\n')
|
420 |
+
st.write('\n')
|
421 |
+
st.write('\n')
|
422 |
+
st.write('\n')
|
423 |
+
st.write('\n')
|
424 |
+
st.write('\n')
|
425 |
+
st.write('\n')
|
426 |
+
st.write('\n')
|
427 |
+
st.write('\n')
|
428 |
+
st.write('\n')
|
429 |
+
st.write('\n')
|
430 |
+
st.write('\n')
|
431 |
+
st.write('\n')
|
432 |
+
image = Image.open("magna.png")
|
433 |
+
st.image(image, caption='Inteligencia Artificial para tu negocio')
|
434 |
+
st.markdown("Contáctanos: www.magna-machina.com")
|
435 |
+
|
436 |
+
|
437 |
+
|
438 |
+
|
439 |
+
|
440 |
+
|
magna.png
ADDED
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pandas==1.5.2
|
2 |
+
darts==0.24.0
|
3 |
+
matplotlib==3.7.1
|
4 |
+
streamlit==1.26.0
|
5 |
+
numpy==1.24.3
|
6 |
+
plotly==5.14.1
|
7 |
+
openpyxl==3.1.2
|