Spaces:
Sleeping
Sleeping
# %% | |
# -*- coding: utf-8 -*- | |
""" | |
Spyder Editor | |
This is a temporary script file. | |
""" | |
from numpy import arange | |
import xarray as xr | |
import highspy | |
from linopy import Model, EQUAL | |
import pandas as pd | |
import plotly.express as px | |
import streamlit as st | |
import sourced as src | |
import numpy as np | |
import tempfile | |
## Setting | |
write_pickle_from_standard_excel = True | |
st.set_page_config(layout="wide") | |
# you can create columns to better manage the flow of your page | |
# this command makes 3 columns of equal width | |
col1, col2, col3, col4 = st.columns(4) | |
col1.header("Data Input") | |
col4.header("Download Results") | |
# Color dictionary for figures | |
color_dict = {'Biomasse': 'lightgreen', | |
'Braunkohle': 'red', | |
'Erdgas': 'orange', | |
'Steinkohle': 'darkgrey', | |
'Erdöl': 'brown', | |
'Laufwasser': 'aquamarine', | |
'Kernenergie': 'cyan', | |
'PV': 'yellow', | |
'WindOff': 'darkblue', | |
'WindOn': 'blue', | |
'Batteriespeicher': 'purple'} | |
# %% | |
with col1: | |
with open('Input_Jahr_2021.xlsx', 'rb') as f: | |
st.download_button('Download Excel Vorlage', f, file_name='Input_Jahr_2021.xlsx') # Defaults to 'application/octet-stream' | |
#url_excel = r'Input_Jahr_2021.xlsx' | |
url_excel = st.file_uploader(label = 'Excel Datei hochladen') | |
if url_excel == None: | |
if write_pickle_from_standard_excel: | |
url_excel = r'Input_Jahr_2021.xlsx' | |
sets_dict, params_dict= src.load_data_from_excel(url_excel, write_to_pickle_flag= True) | |
sets_dict, params_dict = src.load_from_pickle() | |
with col4: | |
st.write('Lauf mit Standarddaten') | |
else: | |
# sets_dict, params_dict= src.load_data_from_excel(url_excel, load_from_pickle_flag = False) | |
sets_dict, params_dict= src.load_data_from_excel(url_excel, write_to_pickle_flag = True) | |
with col4: | |
st.write('Lauf mit Nutzerdaten') | |
# Debugging output to verify that sets_dict is defined | |
# st.write(f"sets_dict: {sets_dict}") | |
# st.write(f"params_dict: {params_dict}") | |
# # %% | |
def timstep_aggregate(time_steps_aggregate, xr ): | |
return xr.rolling( t = time_steps_aggregate).mean().sel(t = t[0::time_steps_aggregate]) | |
#s_t_r_iRes = timstep_aggregate(6,s_t_r_iRes) | |
# %% | |
#sets_dict, params_dict= src.load_data_from_excel(url_excel,write_to_pickle_flag=True) | |
# %% | |
#sets_dict, params_dict= load_data_from_excel(url_excel, load_from_pickle_flag = False) | |
# Unpack sets_dict into the workspace | |
t = sets_dict['t'] | |
t_original = sets_dict['t'] | |
i = sets_dict['i'] | |
iSto = sets_dict['iSto'] | |
iConv = sets_dict['iConv'] | |
# iPtG = sets_dict['iPtG'] | |
iRes = sets_dict['iRes'] | |
# iHyRes = sets_dict['iHyRes'] | |
# Unpack params_dict into the workspace | |
l_co2 = params_dict['l_co2'] | |
p_co2 = params_dict['p_co2'] | |
eff_i = params_dict['eff_i'] | |
life_i = params_dict['life_i'] | |
c_fuel_i = params_dict['c_fuel_i'] | |
c_other_i = params_dict['c_other_i'] | |
c_inv_i = params_dict['c_inv_i'] | |
co2_factor_i = params_dict['co2_factor_i'] | |
c_var_i = params_dict['c_var_i'] | |
K_0_i = params_dict['K_0_i'] | |
e2p_iSto = params_dict['e2p_iSto'] | |
# Sliders and input boxes for parameters | |
with col2: | |
# Slider for CO2 limit [mio. t] | |
l_co2 = st.slider(value=int(params_dict['l_co2']), min_value=0, max_value=750, label="CO2 Limit [Mio. t]", step=10) | |
# # Slider for H2 price / usevalue [€/MWH_th] | |
# price_h2 = st.slider(value=100, min_value=0, max_value=300, label="Wasserstoffpreis [€/MWh]", step=10) | |
for i_idx in c_fuel_i.get_index('i'): | |
if i_idx in ['Braunkohle']: | |
c_fuel_i.loc[i_idx] = st.slider(value=int(c_fuel_i.loc[i_idx]), min_value=0, max_value=300, label=i_idx + ' Preis [€/MWh]' , step=10) | |
dt = st.number_input(label="Zeitliche Auflösung [h]", min_value=1, max_value=len(t), value=6, help="Geben Sie nur ganze Zahlen zwischen 1 und 8760 (oder 8784 für Schaltjahre) ein.") | |
with col3: | |
# Slider for CO2 limit [mio. t] | |
for i_idx in c_fuel_i.get_index('i'): | |
if i_idx in ['Steinkohle', 'Erdöl','Erdgas']: | |
c_fuel_i.loc[i_idx] = st.slider(value=int(c_fuel_i.loc[i_idx]), min_value=0, max_value=300, label=i_idx + ' Preis [€/MWh]' , step=10) | |
# technologies_invest = st.multiselect(label='Technologien für Investitionen', options=i, default=['Biomasse','Laufwasser','Kernenergie','Braunkohle','Steinkohle','Erdöl','Erdgas','WindOff','WindOn','PV','Batteriespeicher']) | |
technologies_invest = st.multiselect(label='Technologien für Investitionen', options=i, default=['Kernenergie','Braunkohle','Steinkohle','Erdgas', 'Erdöl','Biomasse','Laufwasser','WindOn','WindOff','PV','Batteriespeicher']) | |
technologies_no_invest = [x for x in i if x not in technologies_invest] | |
# Aggregate time series | |
D_t = timstep_aggregate(dt,params_dict['D_t']) | |
s_t_r_iRes = timstep_aggregate(dt,params_dict['s_t_r_iRes']) | |
# h_t = timstep_aggregate(dt,params_dict['h_t']) | |
t = D_t.get_index('t') | |
partial_year_factor = (8760/len(t))/dt | |
#time_steps_aggregate = 6 | |
#= xr_profiles.rolling( time_step = time_steps_aggregate).mean().sel(time_step = time[0::time_steps_aggregate]) | |
price_co2 = 0 | |
# Aggregate time series | |
#D_t = timstep_aggregate(dt,params_dict['D_t']) | |
#s_t_r_iRes = timstep_aggregate(dt,params_dict['s_t_r_iRes']) | |
#h_t = timstep_aggregate(dt,params_dict['h_t']) | |
#t = D_t.get_index('t') | |
#partial_year_factor = (8760/len(t))/dt | |
#technologies_no_invest = st.multiselect(label='Technology invest', options=i) | |
#technologies_no_invest = ['Electrolyzer','Biomass','RoR','Hydro Water Reservoir','Nuclear'] | |
# %% | |
### Variables | |
m = Model() | |
C_tot = m.add_variables(name = 'C_tot') # Total costs | |
C_op = m.add_variables(name = 'C_op', lower = 0) # Operational costs | |
C_inv = m.add_variables(name = 'C_inv', lower = 0) # Investment costs | |
K = m.add_variables(coords = [i], name = 'K', lower = 0) # Endogenous capacity | |
y = m.add_variables(coords = [t,i], name = 'y', lower = 0) # Electricity production --> für Elektrolyseure ausschließen | |
y_ch = m.add_variables(coords = [t,i], name = 'y_ch', lower = 0) # Electricity consumption --> für alles außer Elektrolyseure und Speicher ausschließen | |
l = m.add_variables(coords = [t,i], name = 'l', lower = 0) # Storage filling level | |
w = m.add_variables(coords = [t], name = 'w', lower = 0) | |
y_curt = m.add_variables(coords = [t,i], name = 'y_curt', lower = 0) # RES curtailment | |
# y_h2 = m.add_variables(coords = [t,i], name = 'y_h2', lower = 0) | |
## Objective function | |
C_tot = C_op + C_inv | |
m.add_objective(C_tot) | |
## Costs terms for objective function | |
# Operational costs (minus revenue for produced hydrogen) | |
# C_op_sum = m.add_constraints((y * c_fuel_i/eff_i).sum() * dt - (y_h2.sel(i = iPtG) * price_h2).sum() * dt == C_op, name = 'C_op_sum') | |
C_op_sum = m.add_constraints((y * c_fuel_i/eff_i).sum() * dt == C_op, name = 'C_op_sum') | |
# Investment costs | |
C_inv_sum = m.add_constraints((K * c_inv_i).sum() == C_inv, name = 'C_inv_sum') | |
## Load serving | |
loadserve_t = m.add_constraints((((y ).sum(dims = 'i') - y_ch.sum(dims = 'i')) * dt == D_t.sel(t = t) * dt), name = 'load') | |
# loadserve_t = m.add_constraints((((y ).sum(dims = 'i') ) * dt == D_t.sel(t = t) * dt), name = 'load') | |
## Maximum capacity limit | |
maxcap_i_t = m.add_constraints((y - K <= K_0_i), name = 'max_cap') | |
## Maximum capacity limit | |
maxcap_invest_i = m.add_constraints((K.sel(i = technologies_no_invest) <= 0), name = 'max_cap_invest') | |
## Prevent power production by PtG | |
# no_power_prod_iPtG_t = m.add_constraints((y.sel(i = iPtG) <= 0), name = 'prevent_ptg_prod') | |
## Maximum storage charging and discharging | |
maxcha_iSto_t = m.add_constraints((y.sel(i = iSto) - y_ch.sel(i = iSto) - K.sel(i = iSto) <= K_0_i.sel(i = iSto)), name = 'max_cha') | |
## Maximum electrolyzer capacity | |
# ptg_prod_iPtG_t = m.add_constraints((y_ch.sel(i = iPtG) - K.sel(i = iPtG) <= K_0_i.sel(i = iPtG)), name = 'max_cha_ptg') | |
## PtG H2 production | |
# h2_prod_iPtG_t = m.add_constraints(y_ch.sel(i = iPtG) * eff_i.sel(i = iPtG) == y_h2.sel(i = iPtG), name = 'ptg_h2_prod') | |
## Infeed of renewables | |
infeed_iRes_t = m.add_constraints((y.sel(i = iRes) - s_t_r_iRes.sel(i = iRes).sel(t = t) * K.sel(i = iRes) + y_curt.sel(i = iRes) == s_t_r_iRes.sel(i = iRes).sel(t = t) * K_0_i.sel(i = iRes)), name = 'infeed') | |
## Maximum filling level restriction storage power plant | |
maxcapsto_iSto_t = m.add_constraints((l.sel(i = iSto) - K.sel(i = iSto) * e2p_iSto.sel(i = iSto) <= K_0_i.sel(i = iSto) * e2p_iSto.sel(i = iSto)), name = 'max_sto_filling') | |
## Filling level restriction hydro reservoir | |
# filling_iHydro_t = m.add_constraints(l.sel(i = iHyRes) - l.sel(i = iHyRes).roll(t = -1) + y.sel(i = iHyRes) * dt == h_t.sel(t = t) * dt, name = 'filling_level_hydro') | |
## Filling level restriction other storages | |
filling_iSto_t = m.add_constraints(l.sel(i = iSto) - (l.sel(i = iSto).roll(t = -1) + (y.sel(i = iSto) / eff_i.sel(i = iSto)) * dt - y_ch.sel(i = iSto) * eff_i.sel(i = iSto) * dt) == 0, name = 'filling_level') | |
## CO2 limit | |
# l_co2 = 50 | |
CO2_limit = m.add_constraints(((y / eff_i) * co2_factor_i * dt).sum() <= l_co2 * 1_000_000 , name = 'CO2_limit') | |
## set run-of-river power plants capacity limit to 5 GW | |
RoR_cap = m.add_constraints(K.sel(i = 'Laufwasser') <= 5000, name = 'RoR_cap') | |
Biomass_cap = m.add_constraints(K.sel(i = 'Biomasse') <= 9000, name = 'Biomass_cap') | |
# Nuclear_cap = m.add_constraints(K.sel(i = 'Kernenergie') <= 3000, name = 'Kernenergie_cap') | |
# nuclear_production_constraint = m.add_constraints(y.sel(i='Kernenergie') == K.sel(i='Kernenergie'), name='Nuclear_Production_Capacity') | |
# %% | |
m.solve(solver_name = 'highs') | |
st.markdown("---") | |
colb1, colb2 = st.columns(2) | |
# %% | |
#c_var_i.to_dataframe(name='VarCosts') | |
# %% | |
# Installed Cap | |
# Assuming df_excel has columns 'All' and 'Capacities' | |
fig = px.bar((m.solution['K']+K_0_i).to_dataframe(name='K').reset_index(), \ | |
y='i', x='K', orientation='h', title='Installierte Kapazitäten insgesamt [MW]', color='i') | |
#fig | |
# %% | |
total_costs = float(m.solution['C_inv'].values) + float(m.solution['C_op'].values) | |
total_costs_rounded = round(total_costs/1e9, 2) | |
df_total_costs = pd.DataFrame({'Total costs':[total_costs]}) | |
with colb1: | |
st.write('Gesamtkosten: ' + str(total_costs_rounded) + ' Mrd. €') | |
# %% | |
#df_Co2_price = pd.DataFrame({'CO2_Price: ':[float(m.constraints['CO2_limit'].dual.values) * (-1)]}) | |
CO2_price = float(m.constraints['CO2_limit'].dual.values) * (-1) | |
CO2_price_rounded = round(CO2_price, 2) | |
df_CO2_price = pd.DataFrame({'CO2 price':[CO2_price]}) | |
with colb2: | |
#st.write(str(df_Co2_price)) | |
st.write('CO2 Preis: ' + str(CO2_price_rounded) + ' €/t') | |
# %% | |
df_new_capacities = m.solution['K'].to_dataframe().reset_index() | |
fig = px.bar(m.solution['K'].to_dataframe().reset_index(), y='i', x='K', orientation='h', title='Neu installierte Kapazitäten [MW]', color='i', color_discrete_map=color_dict) | |
with colb1: | |
fig | |
# %% | |
D_t_sorted = D_t.sortby(D_t, ascending = False).to_dataframe().reset_index() | |
# NaN entries to the end | |
D_t_sorted = D_t_sorted.sort_values(by='Nachfrage', ascending=False).reset_index(drop=True) | |
# expand df_price to the size of t | |
D_t_sorted = D_t_sorted.loc[D_t_sorted.index.repeat(dt)].reset_index(drop=True) | |
x_loadcurve = np.arange(1, D_t_sorted['Nachfrage'].size + 1) | |
# residual load curve | |
df_production_res = m.solution['y'].sel(i = iRes).to_dataframe().reset_index() | |
# sum up over t | |
df_production_res_sum = df_production_res.groupby('t')['y'].sum().reset_index() | |
# D_t into dateframe | |
D_t_df = D_t.to_dataframe().reset_index() | |
df_residual = D_t_df['Nachfrage'] - df_production_res_sum['y'] | |
# sort | |
df_residual = df_residual.sort_values(ascending=False).reset_index(drop=True) | |
df_residual = df_residual.loc[df_residual.index.repeat(dt)].reset_index(drop=True) | |
df_combined = pd.DataFrame({ | |
'x': np.concatenate([x_loadcurve, x_loadcurve]), | |
'y': np.concatenate([D_t_sorted['Nachfrage'], df_residual]), | |
'label': ['Nachfrage'] * len(x_loadcurve) + ['Residual Load'] * len(x_loadcurve) | |
}) | |
# Create the integrated plot using Plotly Express | |
fig = px.line(df_combined, x='x', y='y', color='label', title='Lastdauerlinie [MW]', | |
labels={"x": "Stunden im Jahr", "y": "Leistung [MW]"}) | |
# Specific updates for each trace | |
fig.for_each_trace( | |
lambda trace: trace.update(line=dict(color='blue')) if trace.name == 'Nachfrage' else trace.update(line=dict(color='red', dash='dash')) | |
) | |
with colb2: | |
fig.update_layout( | |
legend_title='Legende' | |
) | |
fig | |
# fig.show() | |
# %% | |
# calculate full load hours | |
i_with_capacity = m.solution['K'].where( m.solution['K'] > 0).dropna(dim = 'i').get_index('i') | |
df_production = m.solution['y'].sel(i = i_with_capacity).to_dataframe().reset_index() | |
df_capacity = m.solution['K'].sel(i = i_with_capacity).to_dataframe().reset_index() | |
df_production_sum = (df_production.groupby('i')['y'].sum() * dt).round(0).reset_index() | |
# reorder rows according to i_with_capacity | |
df_production_sum = df_production_sum.set_index('i').loc[i_with_capacity].reset_index() | |
# df_production_sum['i'] = pd.Categorical(df_production_sum['i'], categories=desired_order, ordered=True) | |
df_fullload = df_production_sum['y']/df_capacity['K'] | |
# to dataframe | |
df_fullload = df_fullload.to_frame() | |
# rename column | |
df_fullload.columns = ['fullload'] | |
df_fullload['i'] = df_production_sum['i'] | |
# change order of columns | |
df_fullload = df_fullload[['i', 'fullload']] | |
fig = px.bar(df_fullload, y='i', x=df_fullload['fullload'], orientation='h', title='Volllaststunden [h]', color='i', color_discrete_map=color_dict) | |
with colb1: | |
fig | |
# fig.show() | |
# %% | |
fig = px.area(m.solution['y'].sel(i = i_with_capacity).to_dataframe().reset_index(), y='y', x='t', title='Stromproduktion Lastgang [MW]', color='i', color_discrete_map=color_dict) | |
fig.update_traces(line=dict(width=0)) | |
fig.for_each_trace(lambda trace: trace.update(fillcolor = trace.line.color)) | |
with colb1: | |
fig | |
# fig.show() | |
# %% | |
df_price = m.constraints['load'].dual.to_dataframe().reset_index() | |
# expand df_price to the size of t | |
df_price = df_price.loc[df_price.index.repeat(dt)].reset_index(drop=True) | |
# sort prices descending | |
df_sorted_price = df_price["dual"].sort_values(ascending=False).reset_index(drop=True) | |
# generate x-axis for price duration curve | |
x_price = np.arange(1, df_sorted_price.size + 1) | |
fig = px.line(y=df_sorted_price, x=x_price, title='Preisdauerlinie [€/MWh]', labels={"x": "Stunden im Jahr"},range_y=[0,350]) | |
with colb2: | |
fig | |
# %% | |
fig = px.line(df_price, y='dual', x='t', title='Strompreis [€/MWh]', range_y=[0,350]) | |
with colb2: | |
fig | |
# %% | |
# curtailment | |
df_curtailment = m.solution['y_curt'].sel(i = iRes).to_dataframe().reset_index() | |
fig = px.area(m.solution['y_curt'].sel(i = iRes).to_dataframe().reset_index(), y='y_curt', x='t', title='Abregelung [MWh]', color='i', color_discrete_map=color_dict) | |
fig.update_traces(line=dict(width=0)) | |
fig.for_each_trace(lambda trace: trace.update(fillcolor = trace.line.color)) | |
with colb1: | |
fig | |
# %% | |
df_charging = m.solution['y_ch'].sel(i = iSto).to_dataframe().reset_index() | |
fig = px.area(m.solution['y_ch'].sel(i = iSto).to_dataframe().reset_index(), y='y_ch', x='t', title='Speicherbeladung [MWh]', color='i', color_discrete_map=color_dict) | |
fig.update_traces(line=dict(width=0)) | |
fig.for_each_trace(lambda trace: trace.update(fillcolor = trace.line.color)) | |
with colb2: | |
fig | |
# %% | |
# df_contr_marg = m.constraints['max_cap'].dual.to_dataframe().reset_index() | |
# df_contr_marg['dual'] = df_contr_marg['dual'] / dt * (-1) | |
# fig = px.line(df_contr_marg, y='dual', x='t',title='Deckungsbeitrag [€]', color='i', range_y=[0,350], color_discrete_map=color_dict) | |
# with colb2: | |
# fig | |
# %% | |
# generate dataframe steplength = 1 same size as t | |
# x = np.arange(1, t.size + 1) | |
x = np.arange(1,t.size) | |
df_production_pivot = df_production.pivot(index='t', columns='i', values='y') | |
# sort columns according to i_with_capacity | |
df_production_pivot = df_production_pivot[i_with_capacity] | |
df_efficiency = eff_i.sel(i = i_with_capacity) | |
co2_factor_i_with_capacity = co2_factor_i.sel(i = i_with_capacity) | |
# colour_dict = {i: color_dict[i] for i in i_with_capacity} | |
color_dict_with_capacity = {i: color_dict[i] for i in i_with_capacity} | |
desired_order = i_with_capacity.tolist() | |
# multiply df_production with co2 factor | |
df_production_emissions = df_production_pivot/df_efficiency * co2_factor_i_with_capacity*dt | |
# unpivot df_production_emissions, sorting by datetime | |
df_production_emissions_unpivot = df_production_emissions.reset_index().melt(id_vars='t', var_name='i', value_name='y') | |
df_production_emissions_unpivot['i'] = pd.Categorical(df_production_emissions_unpivot['i'], categories=desired_order, ordered=True) | |
df_production_emissions_unpivot = df_production_emissions_unpivot.sort_values(by=['t', 'i']) | |
# rearrange rows according to i_with_capacity | |
# generate area plot of df_production_emissions_unpivot over t | |
fig = px.area(df_production_emissions_unpivot, y='y', x='t', title='Co2-Emissionen [t]', color='i', color_discrete_map=color_dict_with_capacity) | |
fig.update_traces(line=dict(width=0)) | |
fig.for_each_trace(lambda trace: trace.update(fillcolor = trace.line.color)) | |
with colb1: | |
fig | |
# %% | |
# Sum up second row of df_production_emissions | |
df_production_emissions_sum = df_production_emissions.copy() | |
df_production_emissions_sum['total'] = df_production_emissions_sum.sum(axis=1) | |
# sort by total generation | |
df_production_emissions_sum = df_production_emissions_sum.sort_values(by='total', ascending=True) | |
# generate new dataframe where all columns but dateTime are cumulated | |
df_production_emissions_sum_cumsum = df_production_emissions_sum.cumsum(axis=0) | |
# remove columns which are completely zero | |
df_production_emissions_sum_cumsum = df_production_emissions_sum_cumsum.loc[:, (df_production_emissions_sum_cumsum != 0).any(axis=0)] | |
# unpivot df_production_emissions_sum_cumsum | |
df_production_emissions_sum_unpivot = df_production_emissions_sum_cumsum.reset_index().melt(id_vars='t', var_name='i', value_name='y') | |
# keep i = i_with_capacity | |
df_production_emissions_sum_unpivot = df_production_emissions_sum_unpivot[df_production_emissions_sum_unpivot['i'].isin(i_with_capacity)].reset_index(drop=True) | |
# set values 0= NaN | |
df_production_emissions_sum_unpivot['y'] = df_production_emissions_sum_unpivot['y'].replace(0, np.nan) | |
# generate layered area plot of unpivoted_df_sorted_cap over num | |
fig = px.area(df_production_emissions_sum_unpivot, y='y', x='t', title='Kumulierte Co2-Emissionen [t]', color='i', color_discrete_map=color_dict_with_capacity) | |
# fig = px.area(unpivoted_df_sorted_cap, y='cumsum', x='t', title='Kumulierte Co2-Emissionen [t]', color='i', color_discrete_map=color_dict_with_capacity) | |
# Update traces | |
fig.update_traces(line=dict(width=0)) | |
fig.for_each_trace(lambda trace: trace.update(fillcolor=trace.line.color)) | |
# fig.show() | |
with colb2: | |
fig | |
# %% | |
# plot investment costs | |
# c-inv_i to dataframe | |
if c_inv_i.name is None: | |
c_inv_i.name = 'c_inv_i' | |
c_inv_i_df = c_inv_i.to_dataframe().reset_index() | |
# multiply c_inv_i_df with K | |
df_invest_costs = df_new_capacities['K']* c_inv_i | |
df_invest_costs = df_invest_costs.to_frame() | |
df_invest_costs.columns = ['K'] | |
df_invest_costs['i'] = df_new_capacities['i'] | |
fig = px.bar(df_invest_costs, y='i', x='K', orientation='h', title='Investitionskosten [Mrd. €]', color='i', color_discrete_map=color_dict) | |
# fig.show() | |
with colb1: | |
fig | |
# %% | |
df_production_all = m.solution['y'].sel(i = i).to_dataframe().reset_index() | |
# Deckungsbeitrag = Erlöse - Kosten | |
df_contr_marg = m.constraints['max_cap'].dual.to_dataframe().reset_index() | |
# # contr_margin for i_with_capacity | |
# df_contr_marg = df_contr_marg[df_contr_marg['i'].isin(i_with_capacity)]. reset_index(drop=True) | |
# # multiply | |
df_merged = pd.merge(df_production_all, df_contr_marg, on=['t', 'i']) | |
# Perform the multiplication | |
df_merged['y_new'] = df_merged['y'] * df_merged['dual'] | |
df_merged = df_merged[['t', 'i', 'y_new']] | |
df_contr_marg_sum = df_merged.groupby('i')['y_new'].sum().reset_index() | |
df_production_res = m.solution['y'].sel(i = iRes).to_dataframe().reset_index() | |
df_price_res = m.constraints['load'].dual.to_dataframe().reset_index() | |
# multiply with df_price_res | |
df_merged_res = pd.merge(df_production_res, df_price_res, on='t') | |
df_merged_res['multiplied_value'] = df_merged_res['y'] * df_merged_res['dual'] | |
df_merged_res = df_merged_res[['t', 'i', 'multiplied_value']] | |
df_contr_marg_res = df_merged_res.groupby('i')['multiplied_value'].sum().reset_index() | |
df_contr_marg_res['multiplied_value'] = df_contr_marg_res['multiplied_value'] * -dt | |
df_contr_marg_sum = pd.merge(df_contr_marg_sum, df_contr_marg_res, on='i', how='left') | |
df_contr_marg_sum['y_new'] = df_contr_marg_sum['multiplied_value'].combine_first(df_contr_marg_sum['y_new']) | |
df_contr_marg_sum = df_contr_marg_sum.drop(columns=['multiplied_value']) | |
df_contr_marg_sum['y'] = df_contr_marg_sum['y_new']*(-1) | |
# rearrange rows according to i | |
df_contr_marg_sum = df_contr_marg_sum.set_index('i').loc[i].reset_index() | |
# # # barplot | |
fig = px.bar(df_contr_marg_sum, y='i', x='y', orientation='h', title='Deckungsbeitrag [Mrd. €]', color='i', color_discrete_map=color_dict) | |
# fig.show() | |
with colb2: | |
fig | |
# %% | |
# #Add pie chart of total production per technology type in GWh(divide by 1000) | |
# df_production_sum = (df_production.groupby('i')['y'].sum() * dt / 1000 ).round(0).sort_values(ascending=False).reset_index() | |
# fig = px.pie(df_production_sum, names="i", values='y', title='Gesamtproduktion [GWh] als Kuchendiagramm', | |
# color='i', color_discrete_map=color_dict) | |
# with colb2: | |
# fig | |
# %% | |
# # %% | |
# df_h2_prod = m.solution['y_h2'].sel(i = iPtG).to_dataframe().reset_index() | |
# fig = px.area(m.solution['y_h2'].sel(i = iPtG).to_dataframe().reset_index(), y='y_h2', x='t', title='Produktion Wasserstoff [MWh_th]', color='i', color_discrete_map=color_dict) | |
# fig.update_traces(line=dict(width=0)) | |
# fig.for_each_trace(lambda trace: trace.update(fillcolor = trace.line.color)) | |
# with colb2: | |
# fig | |
# %% | |
# #add pie chart which shows new capacities | |
# #round number of new capacities | |
# df_new_capacities_rounded = m.solution['K'].round(0).to_dataframe() | |
# #drop all technologies with K<= 0 | |
# df_new_capacities_rounded = df_new_capacities_rounded[df_new_capacities_rounded["K"] > 0].reset_index() | |
# total_k_sum = df_new_capacities_rounded["K"].sum() | |
# #df_new_capacities_rounded["percentage"] = df_new_capacities_rounded["K"].apply(lambda x: (x/total_k_sum)*100).abs().round(2) | |
# fig = px.pie(df_new_capacities_rounded, names='i', values='K', title='Neu installierte Kapazitäten [MW] als Kuchendiagramm', | |
# color='i', color_discrete_map=color_dict) | |
# with colb1: | |
# fig | |
# %% | |
((m.solution['y'] / eff_i) * co2_factor_i * dt).sum() | |
# %% | |
import pandas as pd | |
from io import BytesIO | |
#from pyxlsb import open_workbook as open_xlsb | |
import streamlit as st | |
import xlsxwriter | |
# %% | |
output = BytesIO() | |
# ## | |
def disaggregate_df(df): | |
if not "t" in list(df.columns): | |
return df | |
#df_repeated = df.iloc[idx_repeat,:].reset_index(drop = True).drop('t', axis = 1) | |
df_t_all = pd.DataFrame({"t_all": t_original.to_series(), 't': t.repeat(dt)}).reset_index(drop=True) | |
## %% | |
df_output = df.merge(df_t_all,on = 't').drop('t',axis = 1).rename({'t_all':'t'}, axis = 1) | |
# last column to first column | |
cols = list(df_output.columns) | |
cols = [cols[-1]] + cols[:-1] | |
df_output = df_output[cols] | |
return df_output.sort_values('t') | |
# Create a Pandas Excel writer using XlsxWriter as the engine | |
with pd.ExcelWriter(output, engine='xlsxwriter') as writer: | |
# Write each DataFrame to a different sheet | |
disaggregate_df(df_total_costs).to_excel(writer, sheet_name='Gesamtkosten', index=False) | |
disaggregate_df(df_CO2_price).to_excel(writer, sheet_name='CO2 Preis', index=False) | |
disaggregate_df(df_price).to_excel(writer, sheet_name='Preise', index=False) | |
# disaggregate_df(df_contr_marg).to_excel(writer, sheet_name='Deckungsbeiträge', index=False) | |
disaggregate_df(df_new_capacities).to_excel(writer, sheet_name='Kapazitäten', index=False) | |
disaggregate_df(df_production).to_excel(writer, sheet_name='Produktion', index=False) | |
disaggregate_df(df_charging).to_excel(writer, sheet_name='Ladevorgänge', index=False) | |
disaggregate_df(D_t.to_dataframe().reset_index()).to_excel(writer, sheet_name='Nachfrage', index=False) | |
disaggregate_df(df_curtailment).to_excel(writer, sheet_name='Abregelung', index=False) | |
# disaggregate_df(df_h2_prod).to_excel(writer, sheet_name='H2 produktion', index=False) | |
with col4: | |
st.download_button( | |
label="Download Excel Arbeitsmappe Ergebnisse", | |
data=output.getvalue(), | |
file_name="Arbeitsmappe_Ergebnisse.xlsx", | |
mime="application/vnd.ms-excel" | |
) | |
# %% | |