Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import subprocess
|
2 |
+
import sys
|
3 |
+
|
4 |
+
# Instalar las librerías necesarias
|
5 |
+
subprocess.check_call([sys.executable, "-m", "pip", "install", "torchmetrics", "scikit-learn"])
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn # Para construir redes neuronales
|
10 |
+
from torch.autograd import Variable
|
11 |
+
from sklearn.preprocessing import MinMaxScaler # Escala y traduce características individuales en un rango dado 0 / 1
|
12 |
+
|
13 |
+
import pandas as pd
|
14 |
+
import matplotlib.pyplot as plt
|
15 |
+
from sklearn.model_selection import train_test_split
|
16 |
+
from sklearn.linear_model import LinearRegression
|
17 |
+
from sklearn.metrics import r2_score, mean_absolute_percentage_error, mean_squared_error
|
18 |
+
|
19 |
+
import torch
|
20 |
+
import torch.optim as optim
|
21 |
+
import torchmetrics
|
22 |
+
|
23 |
+
# Descargar los datos del repositorio de Hugging Face
|
24 |
+
!wget https://huggingface.co/nombre-del-repositorio/resolve/main/PARCIAL-AGUA-_2_.csv
|
25 |
+
!wget https://huggingface.co/nombre-del-repositorio/resolve/main/PARCIAL-AGUA-_3_.csv
|
26 |
+
|
27 |
+
# Cargar los datos
|
28 |
+
data1 = pd.read_csv('PARCIAL-AGUA-_2_.csv')
|
29 |
+
data2 = pd.read_csv('PARCIAL-AGUA-_3_.csv')
|
30 |
+
|
31 |
+
# Convertir la columna 'FECHA' a objetos datetime y filtrar por años
|
32 |
+
data1['FECHA'] = pd.to_datetime(data1['FECHA'])
|
33 |
+
data2['FECHA'] = pd.to_datetime(data2['FECHA'])
|
34 |
+
|
35 |
+
filtered_data1 = data1[data1['FECHA'].dt.year >= 2007]
|
36 |
+
filtered_data2 = data2[data2['FECHA'].dt.year >= 2007]
|
37 |
+
|
38 |
+
# Combinar los valores de ambos conjuntos de datos
|
39 |
+
combined_values = np.concatenate([filtered_data1['VALOR-LS-CF-N'].values, filtered_data2['VALOR-LS-CF-N'].values]).reshape(-1, 1)
|
40 |
+
|
41 |
+
# Seleccionar la variable objetivo y escalar los valores
|
42 |
+
scaler = MinMaxScaler()
|
43 |
+
scaled_values = scaler.fit_transform(combined_values)
|
44 |
+
|
45 |
+
# Dividir los datos escalados en los conjuntos de datos originales
|
46 |
+
scaled_values1 = scaled_values[:len(filtered_data1)]
|
47 |
+
scaled_values2 = scaled_values[len(filtered_data1):]
|