mmmapms commited on
Commit
01f14f6
1 Parent(s): b18a937

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -28
app.py CHANGED
@@ -2,34 +2,68 @@ import streamlit as st
2
  import pandas as pd
3
  import numpy as np
4
  import plotly.graph_objs as go
5
- from io import BytesIO
6
- from datasets import load_dataset
7
-
8
-
9
-
10
- @st.cache_data
11
- def load_data_predictions():
12
- df = pd.read_csv('Predictions.csv')
13
- df = df.rename(columns={
14
- 'Price': 'Real Price',
15
- 'DNN1': 'Neural Network 1',
16
- 'DNN2': 'Neural Network 2',
17
- 'DNN3': 'Neural Network 3',
18
- 'DNN4': 'Neural Network 4',
19
- 'DNN_Ensemble': 'Neural Network Ensemble',
20
- 'LEAR56': 'Regularized Linear Model 1',
21
- 'LEAR84': 'Regularized Linear Model 2',
22
- 'LEAR112': 'Regularized Linear Model 3',
23
- 'LEAR730': 'Regularized Linear Model 4',
24
- 'LEAR_Ensemble': 'Regularized Linear Model Ensemble',
25
- 'Persis': 'Persistence Model',
26
- 'Hybrid_Ensemble': 'Hybrid Ensemble'
27
- })
28
- df['Date'] = pd.to_datetime(df['Date'], dayfirst=True)
29
- df_filtered = df.dropna(subset=['Real Price'])
30
- return df, df_filtered
31
-
32
- df, df_filtered = load_data_predictions()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  min_date_allowed_pred = df_filtered['Date'].min().date()
35
  max_date_allowed_pred = df_filtered['Date'].max().date()
 
2
  import pandas as pd
3
  import numpy as np
4
  import plotly.graph_objs as go
5
+ import requests
6
+ from io import StringIO
7
+
8
+ # Function to load predictions from a GitHub repo
9
+ @st.cache
10
+ def load_data_predictions(github_token):
11
+ # GitHub API URL for the CSV file
12
+ url = 'https://api.github.com/repos/mmmapms/Forecast_DAM_V2/contents/Predictions.csv'
13
+ headers = {'Authorization': f'token {github_token}'}
14
+
15
+ # Make a GET request to the GitHub API
16
+ response = requests.get(url, headers=headers)
17
+
18
+ if response.status_code == 200:
19
+ # GitHub API response contains the file content in base64 encoding
20
+ file_content = response.json()['content']
21
+ csv_content = StringIO(file_content)
22
+
23
+ df = pd.read_csv(csv_content, encoding='utf-8')
24
+ # Your renaming and preprocessing steps here
25
+ df = df.rename(columns={...}) # Your column renaming logic
26
+ df['Date'] = pd.to_datetime(df['Date'], dayfirst=True)
27
+ df_filtered = df.dropna(subset=['Real Price'])
28
+ return df, df_filtered
29
+ else:
30
+ st.error("Failed to download data. Please check your GitHub token and repository details.")
31
+ return pd.DataFrame(), pd.DataFrame()
32
+
33
+ # Streamlit app UI for token input
34
+ st.title("Belgium: Electricity Price Forecasting")
35
+ github_token = st.secrets["GitHub_Token_Margarida"]
36
+
37
+ if github_token:
38
+ df, df_filtered = load_data_predictions(github_token)
39
+ # Your existing logic to use df and df_filtered
40
+ else:
41
+ st.warning("Please enter your GitHub Personal Access Token to proceed.")
42
+
43
+
44
+ #@st.cache_data
45
+ #def load_data_predictions():
46
+ # df = pd.read_csv('Predictions.csv')
47
+ # df = df.rename(columns={
48
+ # 'Price': 'Real Price',
49
+ # 'DNN1': 'Neural Network 1',
50
+ # 'DNN2': 'Neural Network 2',
51
+ # 'DNN3': 'Neural Network 3',
52
+ # 'DNN4': 'Neural Network 4',
53
+ # 'DNN_Ensemble': 'Neural Network Ensemble',
54
+ # 'LEAR56': 'Regularized Linear Model 1',
55
+ # 'LEAR84': 'Regularized Linear Model 2',
56
+ # 'LEAR112': 'Regularized Linear Model 3',
57
+ # 'LEAR730': 'Regularized Linear Model 4',
58
+ # 'LEAR_Ensemble': 'Regularized Linear Model Ensemble',
59
+ # 'Persis': 'Persistence Model',
60
+ # 'Hybrid_Ensemble': 'Hybrid Ensemble'
61
+ #})
62
+ # df['Date'] = pd.to_datetime(df['Date'], dayfirst=True)
63
+ # df_filtered = df.dropna(subset=['Real Price'])
64
+ # return df, df_filtered
65
+
66
+ #df, df_filtered = load_data_predictions()
67
 
68
  min_date_allowed_pred = df_filtered['Date'].min().date()
69
  max_date_allowed_pred = df_filtered['Date'].max().date()