ec98 commited on
Commit
a23b6ea
1 Parent(s): 807df67

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +131 -110
app.py CHANGED
@@ -1,110 +1,131 @@
1
- import streamlit as st
2
- import pandas as pd
3
- import numpy as np
4
- import torch
5
- import torch.nn as nn
6
- import matplotlib.pyplot as plt
7
- from sklearn.preprocessing import MinMaxScaler
8
-
9
- # Cargar los datos de los dos CSV
10
- file1 = 'PARCIAL-AGUA-_2_.csv'
11
- file2 = 'PARCIAL-AGUA-_3_.csv'
12
-
13
- data1 = pd.read_csv(file1)
14
- data2 = pd.read_csv(file2)
15
-
16
- # Convertir la columna 'FECHA' a objetos datetime y filtrar por años
17
- data1['FECHA'] = pd.to_datetime(data1['FECHA'])
18
- data2['FECHA'] = pd.to_datetime(data2['FECHA'])
19
-
20
- filtered_data1 = data1[data1['FECHA'].dt.year >= 2007]
21
- filtered_data2 = data2[data2['FECHA'].dt.year >= 2007]
22
-
23
- combined_values = np.concatenate([filtered_data1['VALOR-LS-CF-N'].values, filtered_data2['VALOR-LS-CF-N'].values]).reshape(-1, 1)
24
-
25
- scaler = MinMaxScaler()
26
- scaled_values = scaler.fit_transform(combined_values)
27
-
28
- scaled_values1 = scaled_values[:len(filtered_data1)]
29
- scaled_values2 = scaled_values[len(filtered_data1):]
30
-
31
- def sliding_windows(data, seq_length):
32
- x, y = [], []
33
- for i in range(len(data) - seq_length):
34
- x.append(data[i:i + seq_length])
35
- y.append(data[i + seq_length])
36
- return np.array(x), np.array(y)
37
-
38
- seq_length = 4
39
- x_train, y_train = sliding_windows(scaled_values1, seq_length)
40
- x_test, y_test = sliding_windows(scaled_values2, seq_length)
41
-
42
- trainX = torch.Tensor(x_train)
43
- trainY = torch.Tensor(y_train)
44
- testX = torch.Tensor(x_test)
45
- testY = torch.Tensor(y_test)
46
-
47
- class LSTM(nn.Module):
48
- def __init__(self, input_size, hidden_size, num_layers, output_size):
49
- super(LSTM, self).__init__()
50
- self.hidden_size = hidden_size
51
- self.num_layers = num_layers
52
- self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
53
- self.fc = nn.Linear(hidden_size, output_size)
54
-
55
- def forward(self, x):
56
- h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
57
- c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
58
- out, _ = self.lstm(x, (h0, c0))
59
- out = self.fc(out[:, -1, :])
60
- return out
61
-
62
- st.title('Predicción de Series de Tiempo')
63
- st.sidebar.title('Parámetros del Modelo')
64
-
65
- model_type = st.sidebar.selectbox('Selecciona el modelo', ('LSTM', 'Otro Modelo'))
66
- num_epochs = st.sidebar.slider('Número de épocas', 100, 500, 200)
67
- learning_rate = st.sidebar.number_input('Tasa de aprendizaje', 0.001, 0.1, 0.01, 0.001)
68
-
69
- if model_type == 'LSTM':
70
- input_size = 1
71
- hidden_size = 50
72
- num_layers = 2
73
- output_size = 1
74
-
75
- model = LSTM(input_size, hidden_size, num_layers, output_size)
76
-
77
- criterion = nn.MSELoss()
78
- optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
79
-
80
- if st.sidebar.button('Entrenar y Predecir'):
81
- for epoch in range(num_epochs):
82
- model.train()
83
- outputs = model(trainX)
84
- optimizer.zero_grad()
85
- loss = criterion(outputs, trainY)
86
- loss.backward()
87
- optimizer.step()
88
- if (epoch+1) % 100 == 0:
89
- st.write(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
90
-
91
- model.eval()
92
- train_predict = model(trainX)
93
- test_predict = model(testX)
94
-
95
- train_predict = scaler.inverse_transform(train_predict.detach().numpy().reshape(-1, 1))
96
- trainY_plot = scaler.inverse_transform(trainY.numpy().reshape(-1, 1))
97
- test_predict = scaler.inverse_transform(test_predict.detach().numpy().reshape(-1, 1))
98
- testY_plot = scaler.inverse_transform(testY.numpy().reshape(-1, 1))
99
-
100
- fig, ax = plt.subplots(figsize=(12, 6))
101
- ax.plot(filtered_data1['FECHA'].values[seq_length:seq_length+len(trainY)], trainY_plot, label='Datos de entrenamiento')
102
- ax.plot(filtered_data1['FECHA'].values[seq_length:seq_length+len(trainY)], train_predict, label='Predicciones de entrenamiento')
103
- ax.plot(filtered_data2['FECHA'].values[seq_length:seq_length+len(testY)], testY_plot, label='Datos de prueba')
104
- ax.plot(filtered_data2['FECHA'].values[seq_length:seq_length+len(testY)], test_predict, label='Predicciones de prueba')
105
- ax.set_xlabel('Fecha')
106
- ax.set_ylabel('VALOR-LS-CF-N')
107
- ax.set_title('Predicciones con LSTM')
108
- ax.legend()
109
- ax.grid(True)
110
- st.pyplot(fig)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ import torch
5
+ import torch.nn as nn
6
+ import matplotlib.pyplot as plt
7
+ from sklearn.preprocessing import MinMaxScaler
8
+
9
+ # Cargar los datos de los dos CSV
10
+ file1 = 'PARCIAL-AGUA-_2_.csv'
11
+ file2 = 'PARCIAL-AGUA-_3_.csv'
12
+
13
+ data1 = pd.read_csv(file1)
14
+ data2 = pd.read_csv(file2)
15
+
16
+ # Convertir la columna 'FECHA' a objetos datetime y filtrar por años
17
+ data1['FECHA'] = pd.to_datetime(data1['FECHA'])
18
+ data2['FECHA'] = pd.to_datetime(data2['FECHA'])
19
+
20
+ filtered_data1 = data1[data1['FECHA'].dt.year >= 2007]
21
+ filtered_data2 = data2[data2['FECHA'].dt.year >= 2007]
22
+
23
+ combined_values = np.concatenate([filtered_data1['VALOR-LS-CF-N'].values, filtered_data2['VALOR-LS-CF-N'].values]).reshape(-1, 1)
24
+
25
+ scaler = MinMaxScaler()
26
+ scaled_values = scaler.fit_transform(combined_values)
27
+
28
+ scaled_values1 = scaled_values[:len(filtered_data1)]
29
+ scaled_values2 = scaled_values[len(filtered_data1):]
30
+
31
+ def sliding_windows(data, seq_length):
32
+ x, y = [], []
33
+ for i in range(len(data) - seq_length):
34
+ x.append(data[i:i + seq_length])
35
+ y.append(data[i + seq_length])
36
+ return np.array(x), np.array(y)
37
+
38
+ seq_length = 4
39
+ x_train, y_train = sliding_windows(scaled_values1, seq_length)
40
+ x_test, y_test = sliding_windows(scaled_values2, seq_length)
41
+
42
+ trainX = torch.Tensor(x_train)
43
+ trainY = torch.Tensor(y_train)
44
+ testX = torch.Tensor(x_test)
45
+ testY = torch.Tensor(y_test)
46
+
47
+ class LSTM(nn.Module):
48
+ def __init__(self, input_size, hidden_size, num_layers, output_size):
49
+ super(LSTM, self).__init__()
50
+ self.hidden_size = hidden_size
51
+ self.num_layers = num_layers
52
+ self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
53
+ self.fc = nn.Linear(hidden_size, output_size)
54
+
55
+ def forward(self, x):
56
+ h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
57
+ c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
58
+ out, _ = self.lstm(x, (h0, c0))
59
+ out = self.fc(out[:, -1, :])
60
+ return out
61
+
62
+ st.title('Predicción de Series de Tiempo')
63
+ st.sidebar.title('Parámetros del Modelo')
64
+
65
+ model_type = st.sidebar.selectbox('Selecciona el modelo', ('LSTM', 'Otro Modelo'))
66
+ num_epochs = st.sidebar.slider('Número de épocas', 100, 500, 200)
67
+ learning_rate = st.sidebar.number_input('Tasa de aprendizaje', 0.001, 0.1, 0.01, 0.001)
68
+
69
+ if model_type == 'LSTM':
70
+ input_size = 1
71
+ hidden_size = 50
72
+ num_layers = 2
73
+ output_size = 1
74
+
75
+ model = LSTM(input_size, hidden_size, num_layers, output_size)
76
+
77
+ criterion = nn.MSELoss()
78
+ optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
79
+
80
+ if st.sidebar.button('Entrenar y Predecir'):
81
+ for epoch in range(num_epochs):
82
+ model.train()
83
+ outputs = model(trainX)
84
+ optimizer.zero_grad()
85
+ loss = criterion(outputs, trainY)
86
+ loss.backward()
87
+ optimizer.step()
88
+ if (epoch+1) % 100 == 0:
89
+ st.write(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
90
+
91
+ model.eval()
92
+ train_predict = model(trainX)
93
+ test_predict = model(testX)
94
+
95
+ train_predict = scaler.inverse_transform(train_predict.detach().numpy().reshape(-1, 1))
96
+ trainY_plot = scaler.inverse_transform(trainY.numpy().reshape(-1, 1))
97
+ test_predict = scaler.inverse_transform(test_predict.detach().numpy().reshape(-1, 1))
98
+ testY_plot = scaler.inverse_transform(testY.numpy().reshape(-1, 1))
99
+
100
+ train_data = pd.DataFrame({
101
+ 'Fecha': filtered_data1['FECHA'].values[seq_length:seq_length+len(trainY)],
102
+ 'Datos de entrenamiento': trainY_plot,
103
+ 'Predicciones de entrenamiento': train_predict
104
+ })
105
+
106
+ test_data = pd.DataFrame({
107
+ 'Fecha': filtered_data2['FECHA'].values[seq_length:seq_length+len(testY)],
108
+ 'Datos de prueba': testY_plot,
109
+ 'Predicciones de prueba': test_predict
110
+ })
111
+
112
+ # Concatenar los datos para tener una sola tabla
113
+ combined_data = pd.concat([train_data, test_data])
114
+
115
+ # Ajustar el índice
116
+ combined_data.set_index('Fecha', inplace=True)
117
+
118
+ # Mostrar la gráfica en Streamlit
119
+ st.line_chart(combined_data)
120
+
121
+ # fig, ax = plt.subplots(figsize=(12, 6))
122
+ # ax.plot(filtered_data1['FECHA'].values[seq_length:seq_length+len(trainY)], trainY_plot, label='Datos de entrenamiento')
123
+ # ax.plot(filtered_data1['FECHA'].values[seq_length:seq_length+len(trainY)], train_predict, label='Predicciones de entrenamiento')
124
+ # ax.plot(filtered_data2['FECHA'].values[seq_length:seq_length+len(testY)], testY_plot, label='Datos de prueba')
125
+ # ax.plot(filtered_data2['FECHA'].values[seq_length:seq_length+len(testY)], test_predict, label='Predicciones de prueba')
126
+ # ax.set_xlabel('Fecha')
127
+ # ax.set_ylabel('VALOR-LS-CF-N')
128
+ # ax.set_title('Predicciones con LSTM')
129
+ # ax.legend()
130
+ # ax.grid(True)
131
+ # st.pyplot(fig)