MediaMixOptimization / pages /4_AI_Model_Build.py
samkeet's picture
Upload 40 files
00b00eb verified
import streamlit as st
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from data_analysis import format_numbers
import numpy as np
import pickle
from utilities import set_header, load_local_css
import time
import itertools
import statsmodels.api as sm
import numpy as npc
import re
import itertools
from sklearn.metrics import mean_absolute_error, r2_score
from copy import copy
# from sklearn.metrics import mean_absolute_percentage_error
from sklearn.preprocessing import MaxAbsScaler
import os
import matplotlib.pyplot as plt
from statsmodels.stats.outliers_influence import variance_inflation_factor
import statsmodels.api as sm
import statsmodels.formula.api as smf
from data_prep import *
import sqlite3
from utilities import (
set_header,
load_local_css,
update_db,
project_selection,
retrieve_pkl_object,
)
from datetime import datetime, timedelta
import shutil
from post_gres_cred import db_cred
import re
from constants import (
MAX_COMBINATIONS,
MIN_MODEL_NAME_LENGTH,
MIN_P_VALUE_THRESHOLD,
MODEL_POS_COEFF_RATIO_THRESHOLD,
MODEL_P_VALUE_RATIO_THRESHOLD,
MAX_TOP_FEATURES,
MAX_NUM_FILTERS,
DEFAULT_FILTER_VALUE,
VIF_LOW_THRESHOLD,
VIF_HIGH_THRESHOLD,
DEFAULT_TRAIN_RATIO,
)
from log_application import log_message
import sys, traceback
schema = db_cred["schema"]
import warnings
# Suppress all warnings
warnings.filterwarnings("ignore")
st.set_option("deprecation.showPyplotGlobalUse", False)
## DEFINE ALL FUNCTIONS
# Function to save all session state variables to save progress
def save_session(X_train_orig, test_spends, spends_data):
try:
st.session_state["X_train"] = X_train_orig
st.session_state["X_test_spends"] = test_spends
st.session_state["spends_data"] = spends_data
st.session_state["project_dct"]["model_build"]["session_state_saved"] = {}
for key in [
"Model",
"bin_dict",
"used_response_metrics",
"date",
"saved_model_names",
"media_data",
"X_test_spends",
"spends_data",
"model_results_data",
"model_results_df",
"coefficients_df",
]:
st.session_state["project_dct"]["model_build"]["session_state_saved"][
key
] = st.session_state[key]
project_pkl = pickle.dumps(st.session_state["project_dct"]) # db
update_db(
st.session_state["project_number"],
"Model_Build",
"project_dct",
project_pkl,
schema,
# resp_mtrc=None,
) # db
log_message("info", "Session Saved", "Model Build")
st.toast("Session Saved")
except:
# Capture the error details
exc_type, exc_value, exc_traceback = sys.exc_info()
error_message = "".join(
traceback.format_exception(exc_type, exc_value, exc_traceback)
)
log_message(
"error",
f"Error while saving session: {error_message}",
"Model Build",
)
st.warning("An error occured, please try again", icon="⚠️")
def contains_sql_keywords_check(user_input):
sql_keywords = [
"SELECT",
"INSERT",
"UPDATE",
"DELETE",
"DROP",
"ALTER",
"CREATE",
"GRANT",
"REVOKE",
"UNION",
"JOIN",
"WHERE",
"HAVING",
"EXEC",
"TRUNCATE",
"REPLACE",
"MERGE",
"DECLARE",
"SHOW",
"FROM",
]
pattern = "|".join(re.escape(keyword) for keyword in sql_keywords)
return re.search(pattern, user_input, re.IGNORECASE)
# Function to save selected model and session state variables
def save_model(
mod_name, model, X_test, X_train_orig, y_test, y_train, test_spends, spends_data
):
try:
if len(mod_name) > MIN_MODEL_NAME_LENGTH:
mod_name = mod_name + "__" + target_col
if contains_sql_keywords_check(mod_name):
st.warning(
"Input contains SQL keywords. Please avoid using SQL commands.",
icon="⚠️",
)
st.stop()
else:
pass
if is_panel:
random_eff_df = get_random_effects(media_data, panel_col, model)
st.session_state["random_effects"] = random_eff_df
pred_train = model.fittedvalues
pred_test = mdf_predict(X_test, model, random_eff_df)
else:
st.session_state["features_set"] = st.session_state["features_set"] + [
"const"
]
pred_train = model.predict(
X_train_orig[st.session_state["features_set"]]
)
pred_test = model.predict(X_test[st.session_state["features_set"]])
st.session_state["Model"][mod_name] = {
"Model_object": model,
"feature_set": st.session_state["features_set"],
"X_train": X_train_orig,
"X_test": X_test,
"y_train": y_train,
"y_test": y_test,
"pred_train": pred_train,
"pred_test": pred_test,
}
best_model = pickle.dumps(st.session_state["Model"]) # db
update_db(
prj_id=st.session_state["project_number"],
page_nam="Model_Build",
file_nam="best_models",
pkl_obj=best_model,
schema=schema,
# resp_mtrc=None,
) # db
st.success(
"Model "
+ mod_name.split("__")[0]
+ " for "
+ mod_name.split("__")[1]
+ " saved! Proceed to the next page to tune the model"
)
log_message(
"info",
f'Model saved - Model name: {mod_name.split("__")[0]}, Response Metric: {mod_name.split("__")[1]}, Train Start: {st.session_state.train_start_inp}, Test Start: {st.session_state.test_start_inp}',
"Model Build",
)
urm = st.session_state["used_response_metrics"]
urm.append(sel_target_col)
st.session_state["used_response_metrics"] = list(set(urm))
mod_name = ""
st.session_state["saved_model_names"].append(mod_name)
save_session(X_train_orig, test_spends, spends_data)
# else:
# st.warning("Please enter model name")
# # st.stop()
except:
# Capture the error details
exc_type, exc_value, exc_traceback = sys.exc_info()
error_message = "".join(
traceback.format_exception(exc_type, exc_value, exc_traceback)
)
log_message(
"error", f"Error while saving model: {error_message}", "Model Build"
)
st.warning("An error occured, please try again", icon="⚠️")
# # Function to calculate Symmetric Mape
# def smape(actual,forecast):
# # Symmetric Mape (SMAPE) eliminates shortcomings of MAPE :
# ## 1. MAPE becomes insanely high when actual is close to 0
# ## 2. MAPE is more favourable to underforecast than overforecast
# return (1/len(actual)) * np.sum(1 * np.abs(forecast - actual) / (np.abs(actual) + np.abs(forecast)))
def wmape(actual, forecast):
# Weighted MAPE (WMAPE) eliminates the following shortcomings of MAPE & SMAPE
## 1. MAPE becomes insanely high when actual is close to 0
## 2. MAPE is more favourable to underforecast than overforecast
## 3. SMAPE is lower since it has a bigger denominator (actual+forecast).
## 4. Alternate formulas of SMAPE include multiplying the numerator by 2, making the range 0-200%
return np.sum(np.abs(actual - forecast)) / np.sum(np.abs(actual))
def to_percentage(value):
return f"{value * 100:.1f}%"
# Prepare the model results to display
@st.cache_resource(show_spinner=False)
def prepare_data_df(data):
# Process the 'coefficients' column, replace feature name with its hcannel name
@st.cache_resource(show_spinner=False)
def process_dict(d):
channels_saved = st.session_state["project_dct"]["data_import"][
"group_dict"
] # db
channels = copy(channels_saved)
for ex_var in st.session_state["bin_dict"]["Exogenous"]:
channels[ex_var] = [ex_var]
raw_vars = []
for vars in channels.values():
raw_vars = raw_vars + vars
new_dict = {}
for key, value in d.items():
if key == "const" or key.lower() == "intercept":
new_key = "Base Sales"
else:
raw_var = [var for var in raw_vars if var in key][0]
new_key = [
channel
for channel, raw_vars in channels.items()
if raw_var in raw_vars
][0]
new_dict[new_key] = value
return new_dict
# data = data[data["pos_count"] == data["pos_count"].max()].reset_index(
# drop=True
# ) # Sprint4 -- Srishti -- only show models with the lowest num of neg coeffs
data.sort_values(by=["ADJR2"], ascending=False, inplace=True)
data.drop_duplicates(subset="Model_iteration", inplace=True)
# Applying the function to each row in the DataFrame
data["coefficients"] = data["coefficients"].apply(process_dict)
# Convert dictionary items into separate DataFrame columns
coefficients_df = data["coefficients"].apply(pd.Series)
# Rename the columns to remove any trailing underscores and capitalize the words
coefficients_df.columns = [
col.strip("_").replace("_", " ").title() for col in coefficients_df.columns
]
# Normalize each row so that the sum equals 100%
coefficients_df = coefficients_df.apply(
lambda x: round((x / x.sum()) * 100, 2), axis=1
)
# Join the new columns back to the original DataFrame
data = data.join(coefficients_df)
data_df = data[
[
"Model_iteration",
"MAPE",
"ADJR2",
"R2",
"Total Positive Contributions",
"Significance",
]
+ list(coefficients_df.columns)
]
data_df.rename(
columns={
"Model_iteration": "Model Iteration",
"ADJR2": "Adj. R-squared",
"R2": "R-squared",
},
inplace=True,
)
data_df.insert(0, "Rank", range(1, len(data_df) + 1))
return coefficients_df, data_df
# Formatting Text
def format_display(inp):
return inp.title().replace("_", " ").strip()
# Get random effect from MixedLM Model
def get_random_effects(media_data, panel_col, _mdf):
# create an empty dataframe
random_eff_df = pd.DataFrame(columns=[panel_col, "random_effect"])
# Iterate over all panel values and add to dataframe
for i, market in enumerate(media_data[panel_col].unique()):
intercept = _mdf.random_effects[market].values[0]
random_eff_df.loc[i, "random_effect"] = intercept
random_eff_df.loc[i, panel_col] = market
return random_eff_df
# Predict on df using MixedLM model
def mdf_predict(X_df, mdf, random_eff_df):
# Create a copy of input df and predict using MixedLM model i.e fixed effect
X = X_df.copy()
X["fixed_effect"] = mdf.predict(X)
# Merge random effects
X = pd.merge(X, random_eff_df, on=panel_col, how="left")
# Get final predictions by adding random effect to fixed effect
X["pred"] = X["fixed_effect"] + X["random_effect"]
# Drop intermediate columns
X.drop(columns=["fixed_effect", "random_effect"], inplace=True)
return X["pred"]
# Calculate number of combinations for channels with number of top features (n)
def count_combinations(channels, n):
num_combinations = 1
for channel, vars in channels.items():
num_combinations *= len(vars) * n
return num_combinations
# Reset Model results
def reset_model_result_dct():
st.session_state["Model_results"] = {
# "Model_object": [],
"Model_iteration": [],
"Feature_set": [],
"MAPE": [],
"R2": [],
"ADJR2": [],
"pos_count": [],
"coefficients": [],
"Total Positive Contributions": [],
"Significance": [],
}
if "filter_df_base" in st.session_state:
del st.session_state.filter_df_base
def build_model(
iterations,
is_panel,
train_idx,
target_col,
panel_col,
media_data,
current_iteration,
):
st.session_state["project_dct"]["model_build"]["build_button"] = True
st.session_state["iterations"] = iterations
# st.session_state["media_data"] = media_data
st.session_state["media_data"] = st.session_state["media_data"].ffill()
progress_bar = st.progress(0)
start_time = time.time()
progress_text = st.empty()
if int(iterations) < current_iteration:
current_iteration = int(iterations)
i = current_iteration
for selected_features in st.session_state["final_selection"][
current_iteration : int(iterations)
]:
# if st.session_state.run_model_build is False:
# return
if st.session_state.run_model_build:
df = st.session_state["media_data"]
fet = [var for var in selected_features if len(var) > 0]
inp_vars_str = " + ".join(fet)
X = df[fet]
y = df[target_col]
ss = MaxAbsScaler()
X = pd.DataFrame(ss.fit_transform(X), columns=X.columns)
if is_panel:
X[target_col] = y
X[panel_col] = df[panel_col]
X_train = X.iloc[:train_idx]
X_test = X.iloc[train_idx:]
y_train = y.iloc[:train_idx]
y_test = y.iloc[:train_idx:]
md_str = target_col + " ~ " + inp_vars_str
md = smf.mixedlm(
md_str, data=X_train[[target_col] + fet], groups=X_train[panel_col]
)
mdf = md.fit()
coefficients = mdf.fe_params.to_dict()
predicted_values = mdf.fittedvalues
else:
X = sm.add_constant(X)
X_train = X.iloc[:train_idx]
X_test = X.iloc[train_idx:]
y_train = y.iloc[:train_idx]
y_test = y.iloc[:train_idx]
model = sm.OLS(y_train, X_train).fit()
coefficients = model.params.to_dict()
predicted_values = model.predict(X_train)
model_positive = [
col for col in coefficients.keys() if coefficients[col] > 0
]
pvalues = [
var
for var in list(mdf.pvalues if is_panel else model.pvalues)
if var <= MIN_P_VALUE_THRESHOLD
]
if (
len(model_positive) / (len(selected_features) + 1)
) > MODEL_POS_COEFF_RATIO_THRESHOLD and (
len(pvalues) / (len(selected_features) + 1)
) >= MODEL_P_VALUE_RATIO_THRESHOLD:
# mape = mean_absolute_percentage_error(y_train, predicted_values)
mape = wmape(y_train, predicted_values)
r2 = r2_score(y_train, predicted_values)
adjr2 = 1 - (1 - r2) * (len(y_train) - 1) / (
len(y_train) - len(selected_features) - 1
)
st.session_state["Model_results"]["Model_iteration"].append(i)
st.session_state["Model_results"]["Feature_set"].append(fet)
st.session_state["Model_results"]["MAPE"].append(mape)
st.session_state["Model_results"]["R2"].append(r2)
st.session_state["Model_results"]["pos_count"].append(
len(model_positive)
)
st.session_state["Model_results"][
"Total Positive Contributions"
].append(f"{len(model_positive)} / {len(selected_features) + 1}")
st.session_state["Model_results"]["Significance"].append(
f"{len(pvalues)} / {len(selected_features) + 1}"
)
st.session_state["Model_results"]["ADJR2"].append(adjr2)
st.session_state["Model_results"]["coefficients"].append(coefficients)
# pos_count=len(model_positive)
# pos_fraction = f"{len(model_positive)} / {len(selected_features) + 1}"
# significance = f"{len(pvalues)} / {len(selected_features) + 1}"
# with open(os.path.join(save_path, "model_dictionary.pkl"), 'ab') as f:
# pickle.dump([filename, i, fet, mape, r2, adjr2, coefficients, pos_count, pos_fraction, significance], f)
# filename = os.path.join(save_path, "Model_results.pkl")
# with open(filename, "wb") as f:
# pickle.dump(st.session_state["Model_results"], f)
# filename = os.path.join(save_path, "resume.pkl")
# with open(filename, "wb") as f:
resume = pickle.dumps(
{
"iterations": iterations,
"is_panel": is_panel,
"train_idx": train_idx,
"target_col": target_col,
"panel_col": panel_col,
"media_data": st.session_state["media_data"],
"current_iteration": current_iteration,
}
) # db
current_time = time.time()
time_taken = current_time - start_time
time_elapsed_minutes = time_taken / 60
completed_iterations_text = f"{i + 1}/{int(iterations)}"
current_progress = int((i + 1) / int(iterations) * 100)
progress_bar.progress(current_progress)
# progress_text.text(
# f"Completed iterations: {completed_iterations_text}, Time Elapsed (min): {time_elapsed_minutes:.2f}"
# )
progress_text.text(f"Completed iterations: {completed_iterations_text}")
if i % 5000 == 0 and i > 0:
update_db(
prj_id=st.session_state["project_number"],
page_nam="Model_Build",
file_nam="resume",
pkl_obj=resume,
schema="",
# resp_mtrc=None,
) # db
current_iteration += 1
i += 1
update_db(
st.session_state["project_number"],
"Model_Build",
"resume",
resume,
schema,
# resp_mtrc=None,
) # db
st.session_state.run_model_build = False
# with open(os.path.join(save_path, "model_dictionary.pkl"), 'rb') as fr:
# try:
# while True:
# model_row = pickle.load(fr)
# st.session_state["Model_results"]["Model_object"].append(model_row[0])
# st.session_state["Model_results"]["Model_iteration"].append(model_row[1])
# st.session_state["Model_results"]["Feature_set"].append(model_row[2])
# st.session_state["Model_results"]["MAPE"].append(model_row[3])
# st.session_state["Model_results"]["R2"].append(model_row[4])
# st.session_state["Model_results"]["ADJR2"].append(model_row[5])
# st.session_state["Model_results"]["coefficients"].append(model_row[6])
# st.session_state["Model_results"]["pos_count"].append(model_row[7])
# st.session_state["Model_results"]["Total Positive Contributions"].append(model_row[8])
# st.session_state["Model_results"]["Significance"].append(model_row[9])
# except EOFError:
# pass
# os.remove(os.path.join(save_path, "model_dictionary.pkl"))
st.write(
f'Out of {st.session_state["iterations"]} iterations : {len(st.session_state["Model_results"]["Model_iteration"])} valid models'
)
return st.session_state["Model_results"]
def load_model_results(iterations, is_panel, train_idx, target_col, panel_col):
# model_results_path = os.path.join(save_path, "resume.pkl")
resume = retrieve_pkl_object(
st.session_state["project_number"], "Model_Build", "resume", schema
)
if resume is not None and resume["iterations"] > 0:
if resume["current_iteration"] == (int(resume["iterations"]) - 1):
media_data = st.session_state.media_data
current_iteration = 0
st.session_state.run_model_build = False
load_model_results_file()
else:
st.session_state.run_model_build = True
(
iterations,
is_panel,
train_idx,
target_col,
panel_col,
media_data,
current_iteration,
) = (
resume["iterations"],
resume["is_panel"],
resume["train_idx"],
resume["target_col"],
resume["panel_col"],
resume["media_data"],
resume["current_iteration"],
)
load_model_results_file()
else:
media_data = st.session_state.media_data
current_iteration = 0
st.session_state.run_model_build = False
reset_model_result_dct()
return (
iterations,
is_panel,
train_idx,
target_col,
panel_col,
media_data,
current_iteration,
)
def stop_all_models():
try:
resume = pickle.dumps({"iterations": -1})
if "Model_results" in st.session_state:
del st.session_state["Model_results"]
st.session_state.run_model_build = False
update_db(
st.session_state["project_number"],
"Model_Build",
"resume",
resume,
schema,
# resp_mtrc=None,
) # db
log_message(
"info",
"Model build cancelled",
"Model Build",
)
except:
# Capture the error details
exc_type, exc_value, exc_traceback = sys.exc_info()
error_message = "".join(
traceback.format_exception(exc_type, exc_value, exc_traceback)
)
log_message(
"error",
f"Error while cancelling model build: {error_message}",
"Model Build",
)
def load_model_results_file():
# model_results_file_path = os.path.join(save_path, "Model_results.pkl")
model_results = retrieve_pkl_object(
st.session_state["project_number"], "Model_Build", "Model_results", schema
)
if model_results is not None:
st.session_state.Model_results = model_results
st.set_page_config(
page_title="Model Build",
page_icon=":shark:",
layout="wide",
initial_sidebar_state="collapsed",
)
st.markdown(
"""
<style>
button.step-up {display: none;}
button.step-down {display: none;}
div[data-baseweb] {border-radius: 4px;}
</style>""",
unsafe_allow_html=True,
)
load_local_css("styles.css")
set_header()
# Add session state variables
if "emp_id" not in st.session_state:
st.session_state["emp_id"] = None
if "project_name" not in st.session_state:
st.session_state["project_name"] = None
if "project_dct" not in st.session_state:
project_selection()
st.stop()
# if "orig_media_data" not in st.session_state:
# st.session_state["orig_media_data"] = pd.DataFrame()
try:
# Check authentication
if "emp_id" in st.session_state and st.session_state["emp_id"] is not None:
# Display Current project name
cols1 = st.columns([2, 1])
with cols1[0]:
st.markdown(f"**Welcome {st.session_state['username']}**")
with cols1[1]:
st.markdown(f"**Current Project: {st.session_state['project_name']}**")
st.title("AI Model Build")
# If data import output is not preset, ask user to move to data import page
if (
st.session_state["project_dct"]["data_import"]["category_dict"] is None
): # db
st.error("Please move to Data Import Page and save.")
st.stop()
st.session_state["bin_dict"] = st.session_state["project_dct"]["data_import"][
"category_dict"
] # db
# If Transformation output is not preset, ask user to move to Transformation page
if st.session_state["project_dct"]["transformations"]["final_df"] is None:
st.error("Please move to Transformation Page and save transformations.")
log_message(
"warning",
"Transformations not found.",
"Model Build",
)
st.stop() # db
media_data = st.session_state["project_dct"]["transformations"][
"final_df"
].copy() # db
# Initialize session state variables
st.session_state["media_data"] = media_data
st.session_state["available_response_metrics"] = st.session_state["bin_dict"][
"Response Metrics"
]
if "is_tuned_model" not in st.session_state:
st.session_state["is_tuned_model"] = {}
for resp_metric in st.session_state["available_response_metrics"]:
resp_metric = (
resp_metric.lower()
.replace(" ", "_")
.replace("-", "")
.replace(":", "")
.replace("__", "_")
)
st.session_state["is_tuned_model"][resp_metric] = False
if "used_response_metrics" not in st.session_state:
st.session_state["used_response_metrics"] = []
if "saved_model_names" not in st.session_state:
st.session_state["saved_model_names"] = []
if "Model" not in st.session_state:
if (
"session_state_saved"
in st.session_state["project_dct"]["model_build"].keys()
and st.session_state["project_dct"]["model_build"][
"session_state_saved"
]
is not None
and "Model"
in st.session_state["project_dct"]["model_build"][
"session_state_saved"
].keys()
):
st.session_state["Model"] = st.session_state["project_dct"][
"model_build"
]["session_state_saved"]["Model"]
else:
st.session_state["Model"] = {}
if "Model_results" not in st.session_state:
st.session_state["Model_results"] = {
"Model_iteration": [],
"Feature_set": [],
"MAPE": [],
"R2": [],
"ADJR2": [],
"pos_count": [],
"coefficients": [],
"Total Positive Contributions": [],
"Significance": [],
}
if "iterations" not in st.session_state:
st.session_state["iterations"] = 0
if "final_selection" not in st.session_state:
st.session_state["final_selection"] = False
if "model_results_df" not in st.session_state:
if (
"model_results_df"
in st.session_state["project_dct"]["model_build"][
"session_state_saved"
].keys()
):
st.session_state["model_results_df"] = st.session_state["project_dct"][
"model_build"
]["session_state_saved"]["model_results_df"]
else:
st.session_state["model_results_df"] = {}
if "model_results_data" not in st.session_state:
if (
"model_results_data"
in st.session_state["project_dct"]["model_build"][
"session_state_saved"
].keys()
):
st.session_state["model_results_data"] = st.session_state[
"project_dct"
]["model_build"]["session_state_saved"]["model_results_data"]
else:
st.session_state["model_results_data"] = {}
if "coefficients_df" not in st.session_state:
if (
"coefficients_df"
in st.session_state["project_dct"]["model_build"][
"session_state_saved"
].keys()
):
st.session_state["coefficients_df"] = st.session_state["project_dct"][
"model_build"
]["session_state_saved"]["coefficients_df"]
else:
st.session_state["coefficients_df"] = {}
panel_col = "panel"
is_panel = True if media_data["panel"].nunique() > 1 else False
date_col = "date"
media_data[date_col] = pd.to_datetime(media_data[date_col])
if is_panel:
unique_panels = media_data[panel_col].unique()
panel_group = (
media_data.groupby(panel_col)
.agg({date_col: ["min", "max", "count"]})
.reset_index()
)
panel_group.columns = [panel_col, "min", "max", "count"]
if (
(panel_group["min"].nunique() > 1)
or (panel_group["max"].nunique() > 1)
or (panel_group["count"].nunique() > 1)
):
st.warning(
"Panels do not have 100% date overlap, please aggregate or make separate projects for each panel"
)
st.stop()
if is_panel:
media_data.sort_values([date_col, panel_col], inplace=True)
media_data.reset_index(drop=True, inplace=True)
else:
media_data.sort_values([date_col], inplace=True)
media_data.reset_index(drop=True, inplace=True)
date = media_data[date_col]
# Read previously selected response metric for the user (persistence)
default_target_idx = (
st.session_state["project_dct"]["model_build"].get("sel_target_col", None)
if st.session_state["project_dct"]["model_build"].get(
"sel_target_col", None
)
is not None
else st.session_state["available_response_metrics"][0]
)
start_cols = st.columns([.35,.2,.2,.25])
min_date = min(date)
max_date = max(date)
# Select Response Metric
with start_cols[0]:
sel_target_col = st.selectbox(
"Select the response metric",
st.session_state["available_response_metrics"],
index=st.session_state["available_response_metrics"].index(
default_target_idx
),
format_func=format_display,
)
st.session_state["project_dct"]["model_build"][
"sel_target_col"
] = sel_target_col
default_test_start = min_date + (DEFAULT_TRAIN_RATIO * (max_date - min_date))
# Select Train Start Date
with start_cols[1]:
train_start = st.date_input(
"Select train start date",
min_date,
min_value=min_date,
max_value=max_date,
key='train_start_inp'
)
# Select Test Start Date
with start_cols[2]:
test_start = st.date_input(
"Select test start date",
default_test_start,
min_value=min_date,
max_value=max_date,
key='test_start_inp'
)
# select n for top n transformations
with start_cols[3]:
top_n = st.selectbox("Number of Transformations per variable:", [1, 2, 3, 4, 5], index=1)
media_data = media_data[
media_data[date_col] >= pd.to_datetime(train_start)
].reset_index(drop=True)
if is_panel:
media_data.sort_values([date_col, panel_col], inplace=True)
media_data.reset_index(drop=True, inplace=True)
else:
media_data.sort_values([date_col], inplace=True)
media_data.reset_index(drop=True, inplace=True)
st.session_state["media_data"] = media_data
date = media_data[date_col]
st.session_state["date"] = date
train_idx = media_data[
media_data[date_col] >= pd.to_datetime(test_start)
].index[0]
st.session_state["train_idx"] = train_idx
# Format columns
target_col = (
sel_target_col.lower()
.replace(" ", "_")
.replace("-", "")
.replace(":", "")
.replace("__", "_")
)
new_name_dct = {
col: col.lower()
.replace(".", "_")
.lower()
.replace("@", "_")
.replace(" ", "_")
.replace("-", "")
.replace(":", "")
.replace("__", "_")
for col in media_data.columns
}
new_name_dct_inv = {v: k for k, v in new_name_dct.items()}
# filename = os.path.join(st.session_state["project_path"], "transformed_cols_name_dct.pkl")
# with open(filename, "wb") as f:
# pickle.dump(new_name_dct_inv, f) #db?
media_data.columns = [
col.lower()
.replace(".", "_")
.replace("@", "_")
.replace(" ", "_")
.replace("-", "")
.replace(":", "")
.replace("__", "_")
for col in media_data.columns
]
if "is_panel" not in st.session_state:
st.session_state["is_panel"] = is_panel
media_data.reset_index(drop=True, inplace=True)
st.session_state["media_data"] = media_data
y = media_data[target_col]
y.reset_index(drop=True, inplace=True)
# Create spends data
if is_panel:
spends_data = media_data[
st.session_state["bin_dict"]["Spends"] + [date_col, panel_col]
]
else:
spends_data = media_data[
st.session_state["bin_dict"]["Spends"] + [date_col]
]
channels_saved = st.session_state["project_dct"]["data_import"][
"group_dict"
] # db
channels = copy(channels_saved)
for channel in channels.keys():
channels[channel] = [
var
for var in channels[channel]
if var not in st.session_state["bin_dict"]["Spends"]
]
for ex_var in st.session_state["bin_dict"]["Exogenous"]:
channels[ex_var] = [ex_var]
bucket = list(channels.keys())
# Create combinations of variables
top_3_correlated_features = []
original_cols = (
st.session_state["bin_dict"]["Media"]
+ st.session_state["bin_dict"]["Exogenous"]
)
original_cols = [
col.lower()
.replace(".", "_")
.replace("@", "_")
.replace(" ", "_")
.replace("-", "")
.replace(":", "")
.replace("__", "_")
for col in original_cols
]
original_cols = [
col for col in original_cols if "_cost" not in col and "_spend" not in col
]
# Select number of top features
num_top_feats = top_n
# for i in range(MAX_TOP_FEATURES, 0, -1):
# if count_combinations(channels, i) <= MAX_COMBINATIONS:
# num_top_feats = i
# break
# select top features of each column
for col in original_cols:
transformed_cols = [
x
for x in st.session_state["media_data"].columns
if new_name_dct_inv[x].split("@")[0].lower() == col
]
corr_df = (
pd.concat([st.session_state["media_data"][transformed_cols], y], axis=1)
.corr()[target_col]
.iloc[:-1]
)
# Determine if all index labels in corr_df start with 'media'
if all(col.startswith("media") for col in corr_df.index):
# If all values start with 'media', do not take the absolute value
sorted_index = (
corr_df.sort_values(ascending=False).head(num_top_feats).index
)
else:
# If not all values start with 'media', take the absolute value before sorting
sorted_index = (
corr_df.abs().sort_values(ascending=False).head(num_top_feats).index
)
top_3_correlated_features.append(list(sorted_index))
flattened_list = [
item for sublist in top_3_correlated_features for item in sublist
]
all_features_set = {
var: [
col
for col in flattened_list
if new_name_dct_inv[col].split("@")[0] in channels[var]
]
for var in bucket
}
channels_all = [values for values in all_features_set.values()]
st.session_state["combinations"] = list(itertools.product(*channels_all))
st.session_state["final_selection"] = st.session_state["combinations"]
if st.session_state["final_selection"]:
st.write(
f'Total combinations created {format_numbers(len(st.session_state["final_selection"]))}'
)
if not len(st.session_state["final_selection"]) > 0:
log_message(
"warning",
"Combinations not created",
"Model Build",
)
end_date = test_start - timedelta(days=1)
disp_str = (
"Data Split -- Training Period: "
+ train_start.strftime("%B %d, %Y")
+ " - "
+ end_date.strftime("%B %d, %Y")
+ ", Testing Period: "
+ test_start.strftime("%B %d, %Y")
+ " - "
+ max_date.strftime("%B %d, %Y")
)
st.markdown(disp_str)
col1, col2 = st.columns(2)
if st.checkbox("Build all iterations", key="build_all_check_box"):
iterations = len(st.session_state["final_selection"])
st.session_state["project_dct"]["model_build"]["all_iters_check"] = True
else:
default_model_iters = (
st.session_state["iterations"]
if st.session_state["iterations"]
<= len(st.session_state["final_selection"])
else 1
)
iterations = col1.number_input(
"Select the number of iterations to perform",
min_value=0,
max_value=len(st.session_state["final_selection"]),
value=default_model_iters,
on_change=reset_model_result_dct,
)
st.session_state["project_dct"]["model_build"]["all_iters_check"] = False
st.session_state["project_dct"]["model_build"]["iterations"] = iterations
(
iterations,
is_panel,
train_idx,
target_col,
panel_col,
media_data,
current_iteration,
) = load_model_results(iterations, is_panel, train_idx, target_col, panel_col)
with col2:
st.markdown("##")
build_col, cancel_col = st.columns(2)
if cancel_col.button(
"Cancel Model Building",
on_click=stop_all_models,
args=(),
use_container_width=True,
):
# st.rerun()
st.info("Model Build Cancelled")
if (
build_col.button(
"Start Model Building",
on_click=reset_model_result_dct,
key="model_build_button",
use_container_width=True,
args=(),
)
or st.session_state.run_model_build
):
if iterations < 1:
st.error("Please select number of iterations")
st.stop()
st.session_state.run_model_build = True
try:
build_model(
iterations,
is_panel,
train_idx,
target_col,
panel_col,
media_data,
current_iteration,
)
except:
# Capture the error details
exc_type, exc_value, exc_traceback = sys.exc_info()
error_message = "".join(
traceback.format_exception(exc_type, exc_value, exc_traceback)
)
log_message(
"error",
f"Error while model building: {error_message}",
"Model Build",
)
st.warning("An error has occured, please try again", icon="⚠️")
if len(st.session_state["Model_results"]["Model_iteration"]) > 0:
data = pd.DataFrame(st.session_state["Model_results"])
coefficients_df, data_df = prepare_data_df(data)
data.rename(
columns={"ADJR2": "Adj. R-squared", "R2": "R-squared"}, inplace=True
)
st.session_state["coefficients_df"][target_col] = coefficients_df
st.session_state["model_results_data"][target_col] = data
st.session_state["model_results_df"][target_col] = data_df
# Section 5 - Select Model
st.title("Model Selection")
if (
len(st.session_state["Model_results"]["Model_iteration"]) == 0
and st.session_state["model_results_data"].get(target_col, None) is None
):
st.warning("Click Build Model", icon="⚠️")
show_results_defualt = (
st.session_state["project_dct"]["model_build"]["show_results_check"]
if st.session_state["project_dct"]["model_build"]["show_results_check"]
is not None
else False
)
if "tick" not in st.session_state:
st.session_state["tick"] = False
#
if st.session_state["model_results_data"].get(target_col, None) is not None:
st.session_state["project_dct"]["model_build"]["show_results_check"] = True
st.session_state["tick"] = True
st.write(
"Select one model iteration to generate performance metrics for it:"
)
data = st.session_state["model_results_data"][target_col]
data_df = st.session_state["model_results_df"][target_col]
coefficients_df = st.session_state["coefficients_df"][target_col]
st.write("### Filter Results")
st.write(
"Use the filters below to refine the displayed model results. This helps in isolating models that do not meet the required business criteria, ensuring only the most relevant models are considered for further analysis. If multiple models meet the criteria, select the first model, as it is considered the best-ranked based on evaluation criteria."
)
# if "base_value" not in st.session_state:
# st.session_state.base_value = True
# if "filter_df_base" not in st.session_state:
# filter_df_data = {
# "Channel Name": pd.Series([], dtype="str"),
# "Filter Condition": pd.Series([], dtype="str"),
# "Value": pd.Series([], dtype="float64"),
# }
# st.session_state.filter_df_base = pd.DataFrame(filter_df_data)
# st.session_state.base_value = not st.session_state.base_value
# if "filter_df_editable_change" not in st.session_state:
# st.session_state.filter_df_editable_change = False
# def filter_df_editable_change():
# st.session_state.filter_df_editable_change = True
# filter_df_editable = st.data_editor(
# st.session_state.filter_df_base,
# column_config={
# "Channel Name": st.column_config.SelectboxColumn(
# options=list(coefficients_df.columns) + ["MAPE", "R2", "ADJR2"],
# required=True,
# default="Base Sales",
# ),
# "Filter Condition": st.column_config.SelectboxColumn(
# options=[
# "<",
# ">",
# "=",
# "<=",
# ">=",
# ],
# required=True,
# default=">",
# ),
# "Value": st.column_config.NumberColumn(
# required=True,
# default=0.00,
# step=0.001,
# format="%.2f",
# ),
# },
# hide_index=True,
# use_container_width=True,
# num_rows="dynamic",
# on_change=filter_df_editable_change,
# key=f"filter_df_editable_{st.session_state.base_value}",
# )
# Input to select the number of filters
with st.expander("Filter Input"):
num_filters = st.number_input(
"Number of Filters",
min_value=0,
max_value=MAX_NUM_FILTERS,
value=0,
# step=1,
)
filters = {}
# Display the filter inputs dynamically
for i in range(num_filters):
filters[i] = {
"Channel Name": "MAPE",
"Filter Condition": ">",
"Value": DEFAULT_FILTER_VALUE,
}
model_metrics = ["MAPE"]
if "R-squared" in data_df.columns:
model_metrics = ["MAPE", "R-squared", "Adj. R-squared"]
elif "R2" in data_df.columns:
model_metrics = ["MAPE", "R2", "ADJR2"]
cols = st.columns(3)
with cols[0]:
filters[i]["Channel Name"] = st.selectbox(
f"Filter Name {i+1}",
options=list(coefficients_df.columns) + model_metrics,
index=(list(coefficients_df.columns) + model_metrics).index(
filters[i]["Channel Name"]
),
)
with cols[1]:
filters[i]["Filter Condition"] = st.selectbox(
f"Filter Condition {i+1}",
options=["<", ">", "=", "<=", ">="],
index=["<", ">", "=", "<=", ">="].index(
filters[i]["Filter Condition"]
),
)
with cols[2]:
filters[i]["Value"] = st.number_input(
f"Value {i+1}",
value=float(filters[i]["Value"]),
# step=1.0,
format="%.2f",
)
# Convert the filters dictionary to a DataFrame
filter_df_editable = pd.DataFrame.from_dict(filters, orient="index")
st.session_state["filtered_df"] = data_df.copy()
for index, row in filter_df_editable.iterrows():
channel_name = row["Channel Name"]
condition = row["Filter Condition"]
value = row["Value"]
if channel_name in st.session_state["filtered_df"].columns:
query_string = f"`{channel_name}` {condition} {value}"
st.session_state["filtered_df"] = st.session_state[
"filtered_df"
].query(query_string)
if st.session_state["filtered_df"].empty:
st.warning("No model meets the specified filter conditions", icon="⚠️")
st.stop()
st.write(
"Select one model iteration to generate performance metrics for it:"
)
display_df = st.session_state.filtered_df.rename(
columns={"Rank": "Model Number"}
)
st.dataframe(display_df, hide_index=True)
min_rank = min(st.session_state["filtered_df"]["Rank"])
max_rank = max(st.session_state["filtered_df"]["Rank"])
available_ranks = st.session_state["filtered_df"]["Rank"].unique()
rank_number = st.number_input(
"Select model by Model Number:",
min_value=min_rank,
max_value=max_rank,
value=min_rank,
# step=1,
)
if rank_number not in available_ranks:
st.warning("No model is available with selected Rank", icon="⚠️")
st.stop()
selected_rows = st.session_state["filtered_df"][
st.session_state["filtered_df"]["Rank"] == rank_number
]
selected_rows = [
(
selected_rows.to_dict(orient="records")[0]
if not selected_rows.empty
else {}
)
]
st.session_state["selected_rows"] = selected_rows
if "Model" not in st.session_state:
st.session_state["Model"] = {}
if len(selected_rows) > 0:
st.header(f"Model Performance for Model Number {rank_number}")
st.markdown(
"This section provides a summary of the regression analysis results. It includes key statistics such as **R-squared**, which indicates how well the model explains the variance in the dependent variable. The table presents the estimated coefficients for the predictors, along with their standard errors, t-values, and p-values, which show the significance of each factor in the model. Overall, this summary helps evaluate the model's performance and the contribution of each predictor to the outcome."
)
features_set = data[
data["Model_iteration"] == selected_rows[0]["Model Iteration"]
]["Feature_set"]
if is_panel:
df = st.session_state["media_data"]
X = df[features_set.values[0] + [target_col]]
fet = features_set.values[0]
y = df[target_col]
ss = MaxAbsScaler()
X = pd.DataFrame(ss.fit_transform(X), columns=X.columns)
X[target_col] = y
X[panel_col] = df[panel_col]
X[date_col] = date
X_train = X.iloc[:train_idx]
X_test = X.iloc[train_idx:].reset_index(drop=True)
y_train = y.iloc[:train_idx]
y_test = y.iloc[train_idx:].reset_index(drop=True)
test_spends = spends_data[train_idx:]
inp_vars_str = " + ".join(fet)
md_str = target_col + " ~ " + inp_vars_str
md = smf.mixedlm(
md_str,
data=X_train[[target_col] + fet],
groups=X_train[panel_col],
)
model = md.fit()
random_eff_df = get_random_effects(
media_data.copy(), panel_col, model
)
train_pred = model.fittedvalues
test_pred = mdf_predict(X_test, model, random_eff_df)
else:
df = st.session_state["media_data"]
X = df[features_set.values[0]]
y = df[target_col]
ss = MaxAbsScaler()
X = pd.DataFrame(ss.fit_transform(X), columns=X.columns)
X = sm.add_constant(X)
X[date_col] = date
X_train = X.iloc[:train_idx]
X_test = X.iloc[train_idx:].reset_index(drop=True)
y_train = y.iloc[:train_idx]
y_test = y.iloc[train_idx:].reset_index(drop=True)
test_spends = spends_data[train_idx:]
model = sm.OLS(
y_train, X_train[features_set.values[0] + ["const"]]
).fit()
train_pred = model.predict(
X_train[features_set.values[0] + ["const"]]
)
test_pred = model.predict(
X_test[features_set.values[0] + ["const"]]
)
st.write(model.summary())
st.header("Actual vs. Predicted Plot (Train)")
st.session_state["X"] = X_train
st.session_state["features_set"] = features_set.values[0]
metrics_table, line, actual_vs_predicted_plot = (
plot_actual_vs_predicted(
X_train[date_col],
y_train,
train_pred,
model,
target_column=sel_target_col,
is_panel=is_panel,
)
)
st.plotly_chart(actual_vs_predicted_plot, use_container_width=True)
st.markdown("## Residual Analysis (Train)")
columns = st.columns(2)
with columns[0]:
st.empty()
fig = plot_residual_predicted(y_train, train_pred, X_train)
st.plotly_chart(fig)
with columns[1]:
# st.empty()
fig = qqplot(y_train, train_pred)
st.plotly_chart(fig)
with columns[0]:
fig = residual_distribution(y_train, train_pred)
st.pyplot(fig)
vif_data = pd.DataFrame()
X_train_orig = X_train.copy()
del_col_list = list(
set([target_col, date_col, panel_col]).intersection(
set(X_train.columns)
)
) # manoj
X_train.drop(columns=del_col_list, inplace=True)
vif_data["Variable"] = X_train.columns
vif_data["VIF"] = [
variance_inflation_factor(X_train.values, i)
for i in range(X_train.shape[1])
]
vif_data.sort_values(by=["VIF"], ascending=False, inplace=True)
vif_data = np.round(vif_data)
vif_data["VIF"] = vif_data["VIF"].astype(float)
st.header("Variance Inflation Factor (Train)")
color_mapping = {
"darkgreen": (vif_data["VIF"] < VIF_LOW_THRESHOLD),
"orange": (vif_data["VIF"] >= VIF_LOW_THRESHOLD)
& (vif_data["VIF"] <= VIF_HIGH_THRESHOLD),
"darkred": (vif_data["VIF"] > VIF_HIGH_THRESHOLD),
}
fig, ax = plt.subplots()
fig.set_figwidth(10)
vif_data = vif_data.sort_values(by="VIF", ascending=False)
for color, condition in color_mapping.items():
subset = vif_data[condition]
bars = ax.barh(
subset["Variable"], subset["VIF"], color=color, label=color
)
for bar in bars:
width = bar.get_width()
ax.annotate(
f"{width:}",
xy=(width, bar.get_y() + bar.get_height() / 2),
xytext=(5, 0),
textcoords="offset points",
va="center",
)
ax.set_xlabel("VIF Values")
st.pyplot(fig)
with st.expander("Results Summary Test data"):
st.header("Actual vs. Predicted Plot (Test)")
metrics_table, line, actual_vs_predicted_plot = (
plot_actual_vs_predicted(
X_test[date_col],
y_test,
test_pred,
model,
target_column=sel_target_col,
is_panel=is_panel,
)
)
st.plotly_chart(actual_vs_predicted_plot, use_container_width=True)
st.markdown("## Residual Analysis (Test)")
columns = st.columns(2)
with columns[0]:
fig = plot_residual_predicted(y_test, test_pred, X_test)
st.plotly_chart(fig)
with columns[1]:
st.empty()
fig = qqplot(y_test, test_pred)
st.plotly_chart(fig)
with columns[0]:
fig = residual_distribution(y_test, test_pred)
st.pyplot(fig)
mod_name = st.text_input("Enter model name", key="Enter_model_name")
cols = st.columns(2)
with cols[0]:
save_button_model = st.button(
"Save this model to tune",
key="build_rc_cb",
use_container_width=True,
on_click=save_model(
mod_name,
model,
X_test,
X_train_orig,
y_test,
y_train,
test_spends,
spends_data,
),
)
st.markdown(
"Save this model to proceed to model tuning. Results of model building will also be saved."
)
with cols[1]:
save_session_button = st.button(
"Save Session",
key="save_session_bttn",
use_container_width=True,
on_click=save_session,
args=(X_train_orig, test_spends, spends_data),
)
st.markdown(
"Save the results of model building without saving a model. You can come back to this page and save a model ."
)
else:
st.session_state["project_dct"]["model_build"]["show_results_check"] = False
# with cols1[0]:
# st.markdown(f"**Welcome {st.session_state['username']}**")
# with cols1[1]:
# st.markdown(f"**Current Project: {st.session_state['project_name']}**")
# st.title("Build Your Model")
# if not os.path.exists(
# os.path.join(st.session_state["project_path"], "data_import.pkl")
# ):
# st.error("Please move to Data Import Page and save.")
# st.stop()
# with open(os.path.join(st.session_state["project_path"], "data_import.pkl"), "rb") as f:
# data = pickle.load(f)
# st.session_state["bin_dict"] = data["bin_dict"]
# if not os.path.exists(
# os.path.join(st.session_state["project_path"], "final_df_transformed.pkl")
# ):
# st.error("Please move to Transformation Page and save transformations.")
# st.stop()
# with open(
# os.path.join(st.session_state["project_path"], "final_df_transformed.pkl"),
# "rb",
# ) as f:
# data = pickle.load(f)
# media_data = data["final_df_transformed"]
# # Sprint4 - available response metrics is a list of all reponse metrics in the data
# ## these will be put in a drop down
# st.session_state["media_data"] = media_data
# if "available_response_metrics" not in st.session_state:
# # st.session_state['available_response_metrics'] = ['Total Approved Accounts - Revenue',
# # 'Total Approved Accounts - Appsflyer',
# # 'Account Requests - Appsflyer',
# # 'App Installs - Appsflyer']
# st.session_state["available_response_metrics"] = st.session_state["bin_dict"][
# "Response Metrics"
# ]
# # Sprint4
# if "is_tuned_model" not in st.session_state:
# st.session_state["is_tuned_model"] = {}
# for resp_metric in st.session_state["available_response_metrics"]:
# resp_metric = (
# resp_metric.lower()
# .replace(" ", "_")
# .replace("-", "")
# .replace(":", "")
# .replace("__", "_")
# )
# st.session_state["is_tuned_model"][resp_metric] = False
# # Sprint4 - used_response_metrics is a list of resp metrics for which user has created & saved a model
# if "used_response_metrics" not in st.session_state:
# st.session_state["used_response_metrics"] = []
# # Sprint4 - saved_model_names
# if "saved_model_names" not in st.session_state:
# st.session_state["saved_model_names"] = []
# if "Model" not in st.session_state:
# if (
# "session_state_saved" in st.session_state["project_dct"]["model_build"].keys()
# and st.session_state["project_dct"]["model_build"]["session_state_saved"]
# is not None
# and "Model"
# in st.session_state["project_dct"]["model_build"]["session_state_saved"].keys()
# ):
# st.session_state["Model"] = st.session_state["project_dct"]["model_build"][
# "session_state_saved"
# ]["Model"]
# else:
# st.session_state["Model"] = {}
# date_col = "date"
# date = media_data[date_col]
# # Sprint4 - select a response metric
# default_target_idx = (
# st.session_state["project_dct"]["model_build"].get("sel_target_col", None)
# if st.session_state["project_dct"]["model_build"].get("sel_target_col", None)
# is not None
# else st.session_state["available_response_metrics"][0]
# )
# start_cols = st.columns(2)
# min_date = min(date)
# max_date = max(date)
# with start_cols[0]:
# sel_target_col = st.selectbox(
# "Select the response metric",
# st.session_state["available_response_metrics"],
# index=st.session_state["available_response_metrics"].index(default_target_idx),
# format_func=format_display,
# )
# # , on_change=reset_save())
# st.session_state["project_dct"]["model_build"]["sel_target_col"] = sel_target_col
# default_test_start = min_date + (3 * (max_date - min_date) / 4)
# with start_cols[1]:
# test_start = st.date_input(
# "Select test start date",
# default_test_start,
# min_value=min_date,
# max_value=max_date,
# )
# train_idx = media_data[media_data[date_col] <= pd.to_datetime(test_start)].index[-1]
# # st.write(train_idx, media_data.index[-1])
# target_col = (
# sel_target_col.lower()
# .replace(" ", "_")
# .replace("-", "")
# .replace(":", "")
# .replace("__", "_")
# )
# new_name_dct = {
# col: col.lower()
# .replace(".", "_")
# .lower()
# .replace("@", "_")
# .replace(" ", "_")
# .replace("-", "")
# .replace(":", "")
# .replace("__", "_")
# for col in media_data.columns
# }
# media_data.columns = [
# col.lower()
# .replace(".", "_")
# .replace("@", "_")
# .replace(" ", "_")
# .replace("-", "")
# .replace(":", "")
# .replace("__", "_")
# for col in media_data.columns
# ]
# panel_col=[]
# is_panel = True if len(panel_col) > 0 else False
# if "is_panel" not in st.session_state:
# st.session_state["is_panel"] = is_panel
# if is_panel:
# media_data.sort_values([date_col, panel_col], inplace=True)
# else:
# media_data.sort_values(date_col, inplace=True)
# media_data.reset_index(drop=True, inplace=True)
# st.session_state["date"] = date
# y = media_data[target_col]
# if is_panel:
# spends_data = media_data[
# [c for c in media_data.columns if "_cost" in c.lower() or "_spend" in c.lower()]
# + [date_col, panel_col]
# ]
# # Sprint3 - spends for resp curves
# else:
# spends_data = media_data[
# [c for c in media_data.columns if "_cost" in c.lower() or "_spend" in c.lower()]
# + [date_col]
# ]
# y = media_data[target_col]
# media_data.drop([date_col], axis=1, inplace=True)
# media_data.reset_index(drop=True, inplace=True)
# columns = st.columns(2)
# old_shape = media_data.shape
# if "old_shape" not in st.session_state:
# st.session_state["old_shape"] = old_shape
# if "media_data" not in st.session_state:
# st.session_state["media_data"] = pd.DataFrame()
# # Sprint3
# if "orig_media_data" not in st.session_state:
# st.session_state["orig_media_data"] = pd.DataFrame()
# # Sprint3 additions
# if "random_effects" not in st.session_state:
# st.session_state["random_effects"] = pd.DataFrame()
# if "pred_train" not in st.session_state:
# st.session_state["pred_train"] = []
# if "pred_test" not in st.session_state:
# st.session_state["pred_test"] = []
# # end of Sprint3 additions
# # Section 3 - Create combinations
# # bucket=['paid_search', 'kwai','indicacao','infleux', 'influencer','FB: Level Achieved - Tier 1 Impressions',
# # ' FB: Level Achieved - Tier 2 Impressions','paid_social_others',
# # ' GA App: Will And Cid Pequena Baixo Risco Clicks',
# # 'digital_tactic_others',"programmatic"
# # ]
# # srishti - bucket names changed
# bucket = [
# "paid_search",
# "kwai",
# "indicacao",
# "infleux",
# "influencer",
# "fb_level_achieved_tier_2",
# "fb_level_achieved_tier_1",
# "paid_social_others",
# "ga_app",
# "digital_tactic_others",
# "programmatic",
# ]
# bucket =[
# 'facebook',
# 'google_search',
# 'google_demand',
# 'youtube',
# 'google_pmax',
# 'bing'
# ] #manoj
# # with columns[0]:
# # if st.button('Create Combinations of Variables'):
# top_3_correlated_features = []
# # # for col in st.session_state['media_data'].columns[:19]:
# # original_cols = [c for c in st.session_state['media_data'].columns if
# # "_clicks" in c.lower() or "_impressions" in c.lower()]
# # original_cols = [c for c in original_cols if "_lag" not in c.lower() and "_adstock" not in c.lower()]
# original_cols = (
# st.session_state["bin_dict"]["Media"] #manoj + st.session_state["bin_dict"]["Internal"]
# )
# original_cols = [
# col.lower()
# .replace(".", "_")
# .replace("@", "_")
# .replace(" ", "_")
# .replace("-", "")
# .replace(":", "")
# .replace("__", "_")
# for col in original_cols
# ]
# original_cols = [col for col in original_cols if "_cost" not in col]
# # for col in st.session_state['media_data'].columns[:19]:
# for col in original_cols: # srishti - new
# corr_df = (
# pd.concat([st.session_state["media_data"].filter(regex=col), y], axis=1)
# .corr()[target_col]
# .iloc[:-1]
# )
# top_3_correlated_features.append(
# list(corr_df.sort_values(ascending=False).head(2).index)
# )
# flattened_list = [item for sublist in top_3_correlated_features for item in sublist]
# # all_features_set={var:[col for col in flattened_list if var in col] for var in bucket}
# all_features_set = {
# var: [col for col in flattened_list if var in col]
# for var in bucket
# if len([col for col in flattened_list if var in col]) > 0
# } # srishti
# channels_all = [values for values in all_features_set.values()]
# st.session_state["combinations"] = list(itertools.product(*channels_all))
# # if 'combinations' not in st.session_state:
# # st.session_state['combinations']=combinations_all
# st.session_state["final_selection"] = st.session_state["combinations"]
# # st.success('Created combinations')
# # revenue.reset_index(drop=True,inplace=True)
# y.reset_index(drop=True, inplace=True)
# if "Model_results" not in st.session_state:
# st.session_state["Model_results"] = {
# "Model_object": [],
# "Model_iteration": [],
# "Feature_set": [],
# "MAPE": [],
# "R2": [],
# "ADJR2": [],
# "pos_count": [],
# "coefficients": [],
# "Total Positive Contributions": [],
# "Significance": [],
# }
# def reset_model_result_dct():
# st.session_state["Model_results"] = {
# "Model_object": [],
# "Model_iteration": [],
# "Feature_set": [],
# "MAPE": [],
# "R2": [],
# "ADJR2": [],
# "pos_count": [],
# "coefficients": [],
# "Total Positive Contributions": [],
# "Significance": [],
# }
# if "filter_df_base" in st.session_state:
# del st.session_state.filter_df_base
# if "iterations" not in st.session_state:
# st.session_state["iterations"] = 0
# if "final_selection" not in st.session_state:
# st.session_state["final_selection"] = False
# save_path = r"Model/"
# if st.session_state["final_selection"]:
# st.write(
# f'Total combinations created {format_numbers(len(st.session_state["final_selection"]))}'
# )
# # st.session_state["project_dct"]["model_build"]["all_iters_check"] = False
# checkbox_default = (
# st.session_state["project_dct"]["model_build"]["all_iters_check"]
# if st.session_state["project_dct"]["model_build"]["all_iters_check"] is not None
# else False
# )
# end_date = test_start - timedelta(days=1)
# disp_str = (
# "Data Split -- Training Period: "
# + min_date.strftime("%B %d, %Y")
# + " - "
# + end_date.strftime("%B %d, %Y")
# + ", Testing Period: "
# + test_start.strftime("%B %d, %Y")
# + " - "
# + max_date.strftime("%B %d, %Y")
# )
# st.markdown(disp_str)
# if st.checkbox("Build all iterations", value=checkbox_default):
# # st.session_state["project_dct"]["model_build"]["all_iters_check"]
# iterations = len(st.session_state["final_selection"])
# st.session_state["project_dct"]["model_build"]["all_iters_check"] = True
# else:
# iterations = st.number_input(
# "Select the number of iterations to perform",
# min_value=0,
# step=100,
# value=st.session_state["iterations"],
# on_change=reset_model_result_dct,
# )
# st.session_state["project_dct"]["model_build"]["all_iters_check"] = False
# st.session_state["project_dct"]["model_build"]["iterations"] = iterations
# # st.stop()
# # build_button = st.session_state["project_dct"]["model_build"]["build_button"] if \
# # "build_button" in st.session_state["project_dct"]["model_build"].keys() else False
# # model_button =st.button('Build Model', on_click=reset_model_result_dct, key='model_build_button')
# # if
# # if model_button:
# if st.button(
# "Build Model",
# on_click=reset_model_result_dct,
# key="model_build_button",
# ):
# if iterations < 1:
# st.error("Please select number of iterations")
# st.stop()
# st.session_state["project_dct"]["model_build"]["build_button"] = True
# st.session_state["iterations"] = iterations
# # Section 4 - Model
# # st.session_state['media_data'] = st.session_state['media_data'].fillna(method='ffill')
# st.session_state["media_data"] = st.session_state["media_data"].ffill()
# progress_bar = st.progress(0) # Initialize the progress bar
# # time_remaining_text = st.empty() # Create an empty space for time remaining text
# start_time = time.time() # Record the start time
# progress_text = st.empty()
# # time_elapsed_text = st.empty()
# # for i, selected_features in enumerate(st.session_state["final_selection"][40000:40000 + int(iterations)]):
# # for i, selected_features in enumerate(st.session_state["final_selection"]):
# if is_panel == True:
# for i, selected_features in enumerate(
# st.session_state["final_selection"][0 : int(iterations)]
# ): # srishti
# df = st.session_state["media_data"]
# fet = [var for var in selected_features if len(var) > 0]
# inp_vars_str = " + ".join(fet) # new
# X = df[fet]
# y = df[target_col]
# ss = MaxAbsScaler()
# X = pd.DataFrame(ss.fit_transform(X), columns=X.columns)
# X[target_col] = y # Sprint2
# X[panel_col] = df[panel_col] # Sprint2
# X_train = X.iloc[:train_idx]
# X_test = X.iloc[train_idx:]
# y_train = y.iloc[:train_idx]
# y_test = y.iloc[train_idx:]
# print(X_train.shape)
# # model = sm.OLS(y_train, X_train).fit()
# md_str = target_col + " ~ " + inp_vars_str
# # md = smf.mixedlm("total_approved_accounts_revenue ~ {}".format(inp_vars_str),
# # data=X_train[[target_col] + fet],
# # groups=X_train[panel_col])
# md = smf.mixedlm(
# md_str,
# data=X_train[[target_col] + fet],
# groups=X_train[panel_col],
# )
# mdf = md.fit()
# predicted_values = mdf.fittedvalues
# coefficients = mdf.fe_params.to_dict()
# model_positive = [
# col for col in coefficients.keys() if coefficients[col] > 0
# ]
# pvalues = [var for var in list(mdf.pvalues) if var <= 0.06]
# if (len(model_positive) / len(selected_features)) > 0 and (
# len(pvalues) / len(selected_features)
# ) >= 0: # srishti - changed just for testing, revert later
# # predicted_values = model.predict(X_train)
# mape = mean_absolute_percentage_error(y_train, predicted_values)
# r2 = r2_score(y_train, predicted_values)
# adjr2 = 1 - (1 - r2) * (len(y_train) - 1) / (
# len(y_train) - len(selected_features) - 1
# )
# filename = os.path.join(save_path, f"model_{i}.pkl")
# with open(filename, "wb") as f:
# pickle.dump(mdf, f)
# # with open(r"C:\Users\ManojP\Documents\MMM\simopt\Model\model.pkl", 'rb') as file:
# # model = pickle.load(file)
# st.session_state["Model_results"]["Model_object"].append(filename)
# st.session_state["Model_results"]["Model_iteration"].append(i)
# st.session_state["Model_results"]["Feature_set"].append(fet)
# st.session_state["Model_results"]["MAPE"].append(mape)
# st.session_state["Model_results"]["R2"].append(r2)
# st.session_state["Model_results"]["pos_count"].append(
# len(model_positive)
# )
# st.session_state["Model_results"][
# "Total Positive Contributions"
# ].append(
# f"{len(model_positive)} / {len(selected_features) + 1}"
# ) # Base Sales / Intercept added with +1
# st.session_state["Model_results"]["Significance"].append(
# f"{len(pvalues)} / {len(selected_features) + 1}"
# )
# st.session_state["Model_results"]["ADJR2"].append(adjr2)
# st.session_state["Model_results"]["coefficients"].append(coefficients)
# current_time = time.time()
# time_taken = current_time - start_time
# time_elapsed_minutes = time_taken / 60
# completed_iterations_text = f"{i + 1}/{iterations}"
# progress_bar.progress((i + 1) / int(iterations))
# progress_text.text(
# f"Completed iterations: {completed_iterations_text},Time Elapsed (min): {time_elapsed_minutes:.2f}"
# )
# st.write(
# f'Out of {st.session_state["iterations"]} iterations : {len(st.session_state["Model_results"]["Model_object"])} valid models'
# )
# else:
# for i, selected_features in enumerate(
# st.session_state["final_selection"][0 : int(iterations)]
# ): # srishti
# df = st.session_state["media_data"]
# fet = [var for var in selected_features if len(var) > 0]
# inp_vars_str = " + ".join(fet)
# X = df[fet]
# y = df[target_col]
# ss = MaxAbsScaler()
# X = pd.DataFrame(ss.fit_transform(X), columns=X.columns)
# X = sm.add_constant(X)
# X_train = X.iloc[:130]
# X_test = X.iloc[130:]
# y_train = y.iloc[:130]
# y_test = y.iloc[130:]
# model = sm.OLS(y_train, X_train).fit()
# coefficients = model.params.to_list()
# model_positive = [coef for coef in coefficients if coef > 0]
# predicted_values = model.predict(X_train)
# pvalues = [var for var in list(model.pvalues) if var <= 0.06]
# # if (len(model_possitive) / len(selected_features)) > 0.9 and (len(pvalues) / len(selected_features)) >= 0.8:
# if (len(model_positive) / len(selected_features)) > 0 and (
# len(pvalues) / len(selected_features)
# ) >= 0.5: # srishti - changed just for testing, revert later VALID MODEL CRITERIA
# # predicted_values = model.predict(X_train)
# mape = mean_absolute_percentage_error(y_train, predicted_values)
# adjr2 = model.rsquared_adj
# r2 = model.rsquared
# filename = os.path.join(save_path, f"model_{i}.pkl")
# with open(filename, "wb") as f:
# pickle.dump(model, f)
# # with open(r"C:\Users\ManojP\Documents\MMM\simopt\Model\model.pkl", 'rb') as file:
# # model = pickle.load(file)
# st.session_state["Model_results"]["Model_object"].append(filename)
# st.session_state["Model_results"]["Model_iteration"].append(i)
# st.session_state["Model_results"]["Feature_set"].append(fet)
# st.session_state["Model_results"]["MAPE"].append(mape)
# st.session_state["Model_results"]["R2"].append(r2)
# st.session_state["Model_results"]["ADJR2"].append(adjr2)
# st.session_state["Model_results"][
# "Total Positive Contributions"
# ].append(f"{len(model_positive)} / {len(selected_features) + 1}")
# st.session_state["Model_results"]["Significance"].append(
# f"{len(pvalues)} / {len(selected_features) + 1}"
# )
# st.session_state["Model_results"]["pos_count"].append(
# len(model_positive)
# )
# st.session_state["Model_results"]["coefficients"].append(coefficients)
# current_time = time.time()
# time_taken = current_time - start_time
# time_elapsed_minutes = time_taken / 60
# completed_iterations_text = f"{i + 1}/{iterations}"
# progress_bar.progress((i + 1) / int(iterations))
# progress_text.text(
# f"Completed iterations: {completed_iterations_text},Time Elapsed (min): {time_elapsed_minutes:.2f}"
# )
# st.write(
# f'Out of {st.session_state["iterations"]} iterations : {len(st.session_state["Model_results"]["Model_object"])} valid models'
# )
# # pd.DataFrame(st.session_state["Model_results"]).to_csv(
# # "model_output.csv"
# # )
# def to_percentage(value):
# return f"{value * 100:.1f}%"
# ## Section 5 - Select Model
# st.title("Select Models")
# if len(st.session_state["Model_results"]["Model_object"]) == 0:
# st.warning("Click Build Model", icon="⚠️")
# show_results_defualt = (
# st.session_state["project_dct"]["model_build"]["show_results_check"]
# if st.session_state["project_dct"]["model_build"]["show_results_check"] is not None
# else False
# )
# if "tick" not in st.session_state:
# st.session_state["tick"] = False
# # if st.checkbox(
# # "Show results of top 10 models (based on MAPE and Adj. R2)",
# # ):
# if len(st.session_state["Model_results"]["Model_object"]) > 0:
# st.session_state["project_dct"]["model_build"]["show_results_check"] = True
# st.session_state["tick"] = True
# st.write("Select one model iteration to generate performance metrics for it:")
# data = pd.DataFrame(st.session_state["Model_results"])
# data = data[data["pos_count"] == data["pos_count"].max()].reset_index(
# drop=True
# ) # Sprint4 -- Srishti -- only show models with the lowest num of neg coeffs
# data.sort_values(by=["ADJR2"], ascending=False, inplace=True)
# data.drop_duplicates(subset="Model_iteration", inplace=True)
# # Display the purpose of the filter section
# st.write(
# """
# ### Filter Results
# Use the filters below to refine the displayed model results. This helps in isolating models that do not meet the required business criteria, ensuring only the most relevant models are considered for further analysis. If multiple models meet the criteria, select the first model, as it is considered the best-ranked based on evaluation criteria.
# """
# )
# # st.write(st.session_state["Model_results"])
# data = pd.DataFrame(data)
# coefficients_df, data_df = prepare_data_df(data)
# if "base_value" not in st.session_state:
# st.session_state.base_value = True
# if "filter_df_base" not in st.session_state:
# # Define the structure of the empty DataFrame
# filter_df_data = {
# "Channel Name": pd.Series([], dtype="str"),
# "Filter Condition": pd.Series([], dtype="str"),
# "Value": pd.Series([], dtype="str"),
# }
# st.session_state.filter_df_base = pd.DataFrame(filter_df_data)
# st.session_state.base_value = not st.session_state.base_value
# if "filter_df_editable_change" not in st.session_state:
# st.session_state.filter_df_editable_change = False
# def filter_df_editable_change():
# st.session_state.filter_df_editable_change = True
# filter_df_editable = st.data_editor(
# st.session_state.filter_df_base,
# column_config={
# "Channel Name": st.column_config.SelectboxColumn(
# options=list(coefficients_df.columns),
# required=True,
# default="Base Sales",
# ),
# "Filter Condition": st.column_config.SelectboxColumn(
# options=[
# "<",
# ">",
# "=",
# "<=",
# ">=",
# ],
# required=True,
# default=">",
# ),
# "Value": st.column_config.NumberColumn(
# required=True, default=0
# ),
# },
# hide_index=True,
# use_container_width=True,
# num_rows="dynamic",
# on_change=filter_df_editable_change,
# key=f"filter_df_editable_{st.session_state.base_value}",
# )
# if st.session_state.filter_df_editable_change:
# st.session_state.filter_df_base = filter_df_editable
# st.session_state.filter_df_editable_change = False
# st.rerun()
# # Apply filters from filter_df_editable to data_df
# if "filtered_df" not in st.session_state:
# st.session_state["filtered_df"] = data_df.copy()
# # if st.button("Filter", args=(data_df)):
# st.session_state["filtered_df"] = data_df.copy()
# for index, row in filter_df_editable.iterrows():
# channel_name = row["Channel Name"]
# condition = row["Filter Condition"]
# value = row["Value"]
# if channel_name in st.session_state["filtered_df"].columns:
# # Construct the query string based on the condition
# query_string = f"`{channel_name}` {condition} {value}"
# st.session_state["filtered_df"] = st.session_state["filtered_df"].query(
# query_string
# )
# # After filtering, check if the DataFrame is empty
# if st.session_state["filtered_df"].empty:
# # Display a warning message if no rows meet the filter criteria
# st.warning("No model meets the specified filter conditions", icon="⚠️")
# st.stop() # Optionally stop further execution
# # Output the filtered data
# st.write("Select one model iteration to generate performance metrics for it:")
# # Dataframe
# display_df = st.session_state.filtered_df.rename(columns={"Rank": "Model Number"})
# st.dataframe(display_df, hide_index=True)
# min_rank = min(st.session_state["filtered_df"]["Rank"])
# max_rank = max(st.session_state["filtered_df"]["Rank"])
# available_ranks = st.session_state["filtered_df"]["Rank"].unique()
# # Get row number input from the user
# rank_number = st.number_input(
# "Select model by Model Number:",
# min_value=min_rank,
# max_value=max_rank,
# value=min_rank,
# step=1,
# )
# # Get row
# if rank_number not in available_ranks:
# st.warning("No model is available with selected Rank", icon="⚠️")
# st.stop()
# # Find the row that matches the selected rank
# selected_rows = st.session_state["filtered_df"][
# st.session_state["filtered_df"]["Rank"] == rank_number
# ]
# selected_rows = [
# (selected_rows.to_dict(orient="records")[0] if not selected_rows.empty else {})
# ]
# # if st.session_state["selected_rows"] != selected_rows:
# # st.session_state["build_rc_cb"] = False
# st.session_state["selected_rows"] = selected_rows
# if "Model" not in st.session_state:
# st.session_state["Model"] = {}
# # # Section 6 - Display Results
# top_10 = data.head(10)
# top_10["Rank"] = np.arange(1, len(top_10) + 1, 1)
# top_10[["MAPE", "R2", "ADJR2"]] = np.round(
# top_10[["MAPE", "R2", "ADJR2"]], 4
# ).applymap(to_percentage)
# top_10_table = top_10[["Rank", "Model_iteration", "MAPE", "ADJR2", "R2"]]
# # top_10_table.columns=[['Rank','Model Iteration Index','MAPE','Adjusted R2','R2']]
# # gd = GridOptionsBuilder.from_dataframe(top_10_table)
# # gd.configure_pagination(enabled=True)
# # gd.configure_selection(
# # use_checkbox=True,
# # selection_mode="single",
# # pre_select_all_rows=False,
# # pre_selected_rows=[1],
# # )
# # gridoptions = gd.build()
# # table = AgGrid(
# # top_10,
# # gridOptions=gridoptions,
# # update_mode=GridUpdateMode.SELECTION_CHANGED,
# # )
# # selected_rows = table.selected_rows
# # if st.session_state["selected_rows"] != selected_rows:
# # st.session_state["build_rc_cb"] = False
# st.session_state["selected_rows"] = selected_rows
# # st.write(
# # """
# # ### Filter Results
# # Use the filters below to refine the displayed model results. This helps in isolating models that do not meet the required business criteria, ensuring only the most relevant models are considered for further analysis. If multiple models meet the criteria, select the first model, as it is considered the best-ranked based on evaluation criteria.
# # """
# # )
# # data = pd.DataFrame(st.session_state["Model_results"])
# # coefficients_df, data_df = prepare_data_df(data)
# # # Define the structure of the empty DataFrame
# # filter_df_data = {
# # "Channel Name": pd.Series([], dtype="str"),
# # "Filter Condition": pd.Series([], dtype="str"),
# # "Value": pd.Series([], dtype="str"),
# # }
# # filter_df = pd.DataFrame(filter_df_data)
# # filter_df_editable = st.data_editor(
# # filter_df,
# # column_config={
# # "Channel Name": st.column_config.SelectboxColumn(
# # options=list(coefficients_df.columns),
# # required=True,
# # default="Base Sales",
# # ),
# # "Filter Condition": st.column_config.SelectboxColumn(
# # options=[
# # "<",
# # ">",
# # "=",
# # "<=",
# # ">=",
# # ],
# # required=True,
# # default=">",
# # ),
# # "Value": st.column_config.NumberColumn(
# # required=True, default=0
# # ),
# # },
# # hide_index=True,
# # use_container_width=True,
# # num_rows="dynamic",
# # )
# # # Apply filters from filter_df_editable to data_df
# # if "filtered_df" not in st.session_state:
# # st.session_state["filtered_df"] = data_df.copy()
# # if st.button("Filter", args=(data_df)):
# # st.session_state["filtered_df"] = data_df.copy()
# # for index, row in filter_df_editable.iterrows():
# # channel_name = row["Channel Name"]
# # condition = row["Filter Condition"]
# # value = row["Value"]
# # if channel_name in st.session_state["filtered_df"].columns:
# # # Construct the query string based on the condition
# # query_string = f"`{channel_name}` {condition} {value}"
# # st.session_state["filtered_df"] = st.session_state["filtered_df"].query(
# # query_string
# # )
# # # After filtering, check if the DataFrame is empty
# # if st.session_state["filtered_df"].empty:
# # # Display a warning message if no rows meet the filter criteria
# # st.warning("No model meets the specified filter conditions", icon="⚠️")
# # st.stop() # Optionally stop further execution
# # # Output the filtered data
# # st.write("Select one model iteration to generate performance metrics for it:")
# # st.dataframe(st.session_state["filtered_df"], hide_index=True)
# #############################################################################################
# # top_10 = data.head(10)
# # top_10["Rank"] = np.arange(1, len(top_10) + 1, 1)
# # top_10[["MAPE", "R2", "ADJR2"]] = np.round(
# # top_10[["MAPE", "R2", "ADJR2"]], 4
# # ).applymap(to_percentage)
# # top_10_table = top_10[
# # ["Rank", "Model_iteration", "MAPE", "ADJR2", "R2"]
# # + list(coefficients_df.columns)
# # ]
# # top_10_table.columns=[['Rank','Model Iteration Index','MAPE','Adjusted R2','R2']]
# # gd = GridOptionsBuilder.from_dataframe(top_10_table)
# # gd.configure_pagination(enabled=True)
# # gd.configure_selection(
# # use_checkbox=True,
# # selection_mode="single",
# # pre_select_all_rows=False,
# # pre_selected_rows=[1],
# # )
# # gridoptions = gd.build()
# # table = AgGrid(
# # top_10, gridOptions=gridoptions, update_mode=GridUpdateMode.SELECTION_CHANGED
# # )
# # selected_rows = table.selected_rows
# # gd = GridOptionsBuilder.from_dataframe(st.session_state["filtered_df"])
# # gd.configure_pagination(enabled=True)
# # gd.configure_selection(
# # use_checkbox=True,
# # selection_mode="single",
# # pre_select_all_rows=False,
# # pre_selected_rows=[1],
# # )
# # gridoptions = gd.build()
# # table = AgGrid(
# # st.session_state["filtered_df"],
# # gridOptions=gridoptions,
# # update_mode=GridUpdateMode.SELECTION_CHANGED,
# # )
# # selected_rows_table = table.selected_rows
# # Dataframe
# # display_df = st.session_state.filtered_df.rename(columns={"Rank": "Model Number"})
# # st.dataframe(display_df, hide_index=True)
# # min_rank = min(st.session_state["filtered_df"]["Rank"])
# # max_rank = max(st.session_state["filtered_df"]["Rank"])
# # available_ranks = st.session_state["filtered_df"]["Rank"].unique()
# # # Get row number input from the user
# # rank_number = st.number_input(
# # "Select model by Model Number:",
# # min_value=min_rank,
# # max_value=max_rank,
# # value=min_rank,
# # step=1,
# # )
# # # Get row
# # if rank_number not in available_ranks:
# # st.warning("No model is available with selected Rank", icon="⚠️")
# # st.stop()
# # Find the row that matches the selected rank
# # selected_rows = st.session_state["filtered_df"][
# # st.session_state["filtered_df"]["Rank"] == rank_number
# # ]
# # selected_rows = [
# # (selected_rows.to_dict(orient="records")[0] if not selected_rows.empty else {})
# # ]
# # if st.session_state["selected_rows"] != selected_rows:
# # st.session_state["build_rc_cb"] = False
# st.session_state["selected_rows"] = selected_rows
# if "Model" not in st.session_state:
# st.session_state["Model"] = {}
# # Section 6 - Display Results
# # Section 6 - Display Results
# # st.write(selected_rows[0])
# if len(selected_rows) > 0:
# st.header("Results Summary")
# model_object = data[
# data["Model_iteration"] == selected_rows[0]["Model Iteration"]
# ]["Model_object"]
# features_set = data[
# data["Model_iteration"] == selected_rows[0]["Model Iteration"]
# ]["Feature_set"]
# with open(str(model_object.values[0]), "rb") as file:
# # print(file)
# model = pickle.load(file)
# st.write(model.summary())
# st.header("Actual vs. Predicted Plot")
# if is_panel:
# df = st.session_state["media_data"]
# X = df[features_set.values[0]]
# y = df[target_col]
# ss = MaxAbsScaler()
# X = pd.DataFrame(ss.fit_transform(X), columns=X.columns)
# # Sprint2 changes
# X[target_col] = y # new
# X[panel_col] = df[panel_col]
# X[date_col] = date
# X_train = X.iloc[:train_idx]
# X_test = X.iloc[train_idx:].reset_index(drop=True)
# y_train = y.iloc[:train_idx]
# y_test = y.iloc[train_idx:].reset_index(drop=True)
# test_spends = spends_data[
# train_idx:
# ] # Sprint3 - test spends for resp curves
# random_eff_df = get_random_effects(media_data, panel_col, model)
# train_pred = model.fittedvalues
# test_pred = mdf_predict(X_test, model, random_eff_df)
# print("__" * 20, test_pred.isna().sum())
# else:
# df = st.session_state["media_data"]
# X = df[features_set.values[0]]
# y = df[target_col]
# ss = MaxAbsScaler()
# X = pd.DataFrame(ss.fit_transform(X), columns=X.columns)
# X = sm.add_constant(X)
# X[date_col] = date
# X_train = X.iloc[:130]
# X_test = X.iloc[130:].reset_index(drop=True)
# y_train = y.iloc[:130]
# y_test = y.iloc[130:].reset_index(drop=True)
# test_spends = spends_data[130:] # Sprint3 - test spends for resp curves
# train_pred = model.predict(X_train[features_set.values[0] + ["const"]])
# test_pred = model.predict(X_test[features_set.values[0] + ["const"]])
# # save x test to test - srishti
# # x_test_to_save = X_test.copy()
# # x_test_to_save['Actuals'] = y_test
# # x_test_to_save['Predictions'] = test_pred
# #
# # x_train_to_save = X_train.copy()
# # x_train_to_save['Actuals'] = y_train
# # x_train_to_save['Predictions'] = train_pred
# #
# # x_train_to_save.to_csv('Test/x_train_to_save.csv', index=False)
# # x_test_to_save.to_csv('Test/x_test_to_save.csv', index=False)
# st.session_state["X"] = X_train
# st.session_state["features_set"] = features_set.values[0]
# print("**" * 20, "selected model features : ", features_set.values[0])
# metrics_table, line, actual_vs_predicted_plot = plot_actual_vs_predicted(
# X_train[date_col],
# y_train,
# train_pred,
# model,
# target_column=sel_target_col,
# is_panel=is_panel,
# ) # Sprint2
# st.plotly_chart(actual_vs_predicted_plot, use_container_width=True)
# st.markdown("## Residual Analysis")
# columns = st.columns(2)
# with columns[0]:
# fig = plot_residual_predicted(y_train, train_pred, X_train) # Sprint2
# st.plotly_chart(fig)
# with columns[1]:
# st.empty()
# fig = qqplot(y_train, train_pred) # Sprint2
# st.plotly_chart(fig)
# with columns[0]:
# fig = residual_distribution(y_train, train_pred) # Sprint2
# st.pyplot(fig)
# vif_data = pd.DataFrame()
# # X=X.drop('const',axis=1)
# X_train_orig = (
# X_train.copy()
# ) # Sprint2 -- creating a copy of xtrain. Later deleting panel, target & date from xtrain
# del_col_list = list(
# set([target_col, panel_col, date_col]).intersection(set(X_train.columns))
# )
# X_train.drop(columns=del_col_list, inplace=True) # Sprint2
# vif_data["Variable"] = X_train.columns
# vif_data["VIF"] = [
# variance_inflation_factor(X_train.values, i) for i in range(X_train.shape[1])
# ]
# vif_data.sort_values(by=["VIF"], ascending=False, inplace=True)
# vif_data = np.round(vif_data)
# vif_data["VIF"] = vif_data["VIF"].astype(float)
# st.header("Variance Inflation Factor (VIF)")
# # st.dataframe(vif_data)
# color_mapping = {
# "darkgreen": (vif_data["VIF"] < 3),
# "orange": (vif_data["VIF"] >= 3) & (vif_data["VIF"] <= 10),
# "darkred": (vif_data["VIF"] > 10),
# }
# # Create a horizontal bar plot
# fig, ax = plt.subplots()
# fig.set_figwidth(10) # Adjust the width of the figure as needed
# # Sort the bars by descending VIF values
# vif_data = vif_data.sort_values(by="VIF", ascending=False)
# # Iterate through the color mapping and plot bars with corresponding colors
# for color, condition in color_mapping.items():
# subset = vif_data[condition]
# bars = ax.barh(subset["Variable"], subset["VIF"], color=color, label=color)
# # Add text annotations on top of the bars
# for bar in bars:
# width = bar.get_width()
# ax.annotate(
# f"{width:}",
# xy=(width, bar.get_y() + bar.get_height() / 2),
# xytext=(5, 0),
# textcoords="offset points",
# va="center",
# )
# # Customize the plot
# ax.set_xlabel("VIF Values")
# # ax.set_title('Variance Inflation Factor (VIF)')
# # ax.legend(loc='upper right')
# # Display the plot in Streamlit
# st.pyplot(fig)
# with st.expander("Results Summary Test data"):
# # ss = MaxAbsScaler()
# # X_test = pd.DataFrame(ss.fit_transform(X_test), columns=X_test.columns)
# st.header("Actual vs. Predicted Plot")
# metrics_table, line, actual_vs_predicted_plot = plot_actual_vs_predicted(
# X_test[date_col],
# y_test,
# test_pred,
# model,
# target_column=sel_target_col,
# is_panel=is_panel,
# ) # Sprint2
# st.plotly_chart(actual_vs_predicted_plot, use_container_width=True)
# st.markdown("## Residual Analysis")
# columns = st.columns(2)
# with columns[0]:
# fig = plot_residual_predicted(y, test_pred, X_test) # Sprint2
# st.plotly_chart(fig)
# with columns[1]:
# st.empty()
# fig = qqplot(y, test_pred) # Sprint2
# st.plotly_chart(fig)
# with columns[0]:
# fig = residual_distribution(y, test_pred) # Sprint2
# st.pyplot(fig)
# value = False
# save_button_model = st.checkbox(
# "Save this model to tune", key="build_rc_cb"
# ) # , on_click=set_save())
# if save_button_model:
# mod_name = st.text_input("Enter model name")
# if len(mod_name) > 0:
# mod_name = (
# mod_name + "__" + target_col
# ) # Sprint4 - adding target col to model name
# if is_panel:
# pred_train = model.fittedvalues
# pred_test = mdf_predict(X_test, model, random_eff_df)
# else:
# st.session_state["features_set"] = st.session_state["features_set"] + [
# "const"
# ]
# pred_train = model.predict(
# X_train_orig[st.session_state["features_set"]]
# )
# pred_test = model.predict(X_test[st.session_state["features_set"]])
# st.session_state["Model"][mod_name] = {
# "Model_object": model,
# "feature_set": st.session_state["features_set"],
# "X_train": X_train_orig,
# "X_test": X_test,
# "y_train": y_train,
# "y_test": y_test,
# "pred_train": pred_train,
# "pred_test": pred_test,
# }
# st.session_state["X_train"] = X_train_orig
# st.session_state["X_test_spends"] = test_spends
# st.session_state["saved_model_names"].append(mod_name)
# # Sprint3 additions
# if is_panel:
# random_eff_df = get_random_effects(media_data, panel_col, model)
# st.session_state["random_effects"] = random_eff_df
# with open(
# os.path.join(st.session_state["project_path"], "best_models.pkl"),
# "wb",
# ) as f:
# pickle.dump(st.session_state["Model"], f)
# st.success(
# mod_name
# + " model saved! Proceed to the next page to tune the model"
# )
# urm = st.session_state["used_response_metrics"]
# urm.append(sel_target_col)
# st.session_state["used_response_metrics"] = list(set(urm))
# mod_name = ""
# # Sprint4 - add the formatted name of the target col to used resp metrics
# value = False
# st.session_state["project_dct"]["model_build"]["session_state_saved"] = {}
# for key in [
# "Model",
# "bin_dict",
# "used_response_metrics",
# "date",
# "saved_model_names",
# "media_data",
# "X_test_spends",
# ]:
# st.session_state["project_dct"]["model_build"]["session_state_saved"][
# key
# ] = st.session_state[key]
# project_dct_path = os.path.join(
# st.session_state["project_path"], "project_dct.pkl"
# )
# with open(project_dct_path, "wb") as f:
# pickle.dump(st.session_state["project_dct"], f)
# update_db("4_Model_Build.py")
# st.toast("💾 Saved Successfully!")
# else:
# st.session_state["project_dct"]["model_build"]["show_results_check"] = False
except:
# Capture the error details
exc_type, exc_value, exc_traceback = sys.exc_info()
error_message = "".join(
traceback.format_exception(exc_type, exc_value, exc_traceback)
)
log_message(
"error",
f"Error occured: {error_message}",
"Model Build",
)
st.warning("An error has occured, please try again", icon="⚠️")
# st.write(error_message)