Space54 / app.py
QuantumLearner's picture
Update app.py
3550b3f verified
import streamlit as st
import requests
import zipfile
import io
import re
import pandas as pd
import yfinance as yf
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.ar_model import AutoReg
#import matplotlib.pyplot as plt
#import matplotlib.dates as mdates
# ---------------------------------------------------------
# Streamlit App
# ---------------------------------------------------------
st.set_page_config(layout="wide")
# ============== SIDEBAR ==============
with st.sidebar.expander("Input Parameters", expanded=True):
portfolio_input = st.text_area(
"Enter ticker-weight pairs",
value="MSFT,0.6\nAAPL,0.4",
help="Each line should be Ticker,Weight (e.g. 'MSFT,0.6' and 'AAPL,0.4')"
)
portfolio = []
for line in portfolio_input.strip().split("\n"):
parts = line.split(",")
if len(parts) == 2:
t = parts[0].strip().upper()
w = float(parts[1].strip())
portfolio.append((t, w))
rolling_window = st.number_input(
"Rolling Window (Days)",
min_value=30,
max_value=2520,
value=252,
step=30,
help="Window size for rolling regression analysis. This shows how factor loadings change over time."
)
factor_list = ["MKT_RF", "SMB", "HML", "RMW", "CMA", "MOM"]
selected_factors = factor_list
run_analysis = st.sidebar.button("Run Analysis", help="Click to run the full analysis.")
# ============== MAIN APP ==============
st.title("Fama-French Factor Model")
st.markdown("### Factor Exposure & Mispricing Detector")
st.write(
"This tool applies the **Fama-French 6-Factor Model** to detect stock mispricing and analyze risk exposure. It evaluates factor influences, tests for mean reversion, and identifies pricing inefficiencies. The model works best for portfolios or ETFs. At the ticker level, results are less reliable but still reveal stock characteristics. Analyses rely on historical data and statistical models. External shocks and unmodeled factors can affect performance beyond what this model captures."
)
with st.expander("Factor Definitions & Construction", expanded=False):
st.write(
"The **Fama-French 6-Factor Model** extends the original 3-Factor Model by incorporating profitability and investment factors, along with momentum. "
"Each factor captures different characteristics that influence stock returns. Below is an overview of these factors, their construction, and their implications."
)
st.markdown("##### Market Factor (MKT_RF)")
st.write("**Definition:** Measures the excess return of the overall market relative to the risk-free rate.")
st.write("**Construction:** Calculated as the return of a broad market portfolio (e.g., S&P 500) minus the risk-free rate.")
st.write("**Implication:** A higher market beta means the stock moves more aggressively than the market.")
st.markdown("##### Size Factor (SMB - Small Minus Big)")
st.write("**Definition:** Captures the historical tendency of small-cap stocks to outperform large-cap stocks.")
st.write("**Construction:** The return spread between a portfolio of small-cap stocks and a portfolio of large-cap stocks.")
st.write("**Implication:** A positive SMB beta means the stock behaves more like a small-cap company.")
st.markdown("##### Value Factor (HML - High Minus Low)")
st.write("**Definition:** Reflects the outperformance of value stocks (high book-to-market) over growth stocks (low book-to-market).")
st.write("**Construction:** The return spread between a portfolio of high book-to-market stocks and a portfolio of low book-to-market stocks.")
st.write("**Implication:** A negative HML beta suggests the stock behaves more like a growth stock.")
st.markdown("##### Profitability Factor (RMW - Robust Minus Weak)")
st.write("**Definition:** Measures the effect of profitability on stock returns, where more profitable firms tend to outperform less profitable firms.")
st.write("**Construction:** The return spread between stocks with high operating profitability and those with low profitability.")
st.write("**Implication:** A positive RMW beta indicates the stock behaves like a highly profitable firm.")
st.markdown("##### Investment Factor (CMA - Conservative Minus Aggressive)")
st.write("**Definition:** Reflects how investment policies affect stock returns, where firms with conservative investment strategies tend to outperform aggressive ones.")
st.write("**Construction:** The return spread between firms that invest conservatively (low asset growth) and firms that invest aggressively (high asset growth).")
st.write("**Implication:** A negative CMA beta means the company follows an aggressive investment strategy.")
st.markdown("##### Momentum Factor (MOM)")
st.write("**Definition:** Captures the tendency of past winners to continue performing well and past losers to underperform.")
st.write("**Construction:** The return spread between stocks with strong past performance and stocks with weak past performance.")
st.write("**Implication:** A positive MOM beta suggests the stock follows momentum-driven trends.")
if run_analysis:
# ==============================================
# 1. Download & Merge Data
# ==============================================
st.header("1. Data Collection")
class FamaFrenchDownloader:
FF5_URL = "https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/F-F_Research_Data_5_Factors_2x3_daily_CSV.zip"
@staticmethod
def download_ff5():
response = requests.get(FamaFrenchDownloader.FF5_URL)
with zipfile.ZipFile(io.BytesIO(response.content)) as z:
file_name = z.namelist()[0]
with z.open(file_name) as f:
return f.read().decode("utf-8").splitlines()
@staticmethod
def parse_ff5_data():
lines = FamaFrenchDownloader.download_ff5()
data_lines = [line for line in lines if re.match(r'^\s*\d{8}', line)]
df = pd.read_csv(io.StringIO("\n".join(data_lines)), sep=r"\s*,\s*", header=None, engine="python")
df.columns = ["Date", "MKT_RF", "SMB", "HML", "RMW", "CMA", "RF"]
df["Date"] = pd.to_datetime(df["Date"], format="%Y%m%d", errors="coerce")
df.iloc[:, 1:] = df.iloc[:, 1:].apply(pd.to_numeric, errors="coerce")
return df
ff5_df = FamaFrenchDownloader.parse_ff5_data()
class MomentumDownloader:
MOM_URL = "https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/F-F_Momentum_Factor_daily_CSV.zip"
@staticmethod
def download_momentum():
response = requests.get(MomentumDownloader.MOM_URL)
with zipfile.ZipFile(io.BytesIO(response.content)) as z:
file_name = z.namelist()[0]
with z.open(file_name) as f:
return f.read().decode("utf-8").splitlines()
@staticmethod
def parse_momentum_data():
lines = MomentumDownloader.download_momentum()
data_lines = [line for line in lines if re.match(r'^\s*\d{8}', line)]
df = pd.read_csv(io.StringIO("\n".join(data_lines)), sep=r"\s*,\s*", header=None, engine="python")
df.columns = ["Date", "MOM"]
df["Date"] = pd.to_datetime(df["Date"], format="%Y%m%d", errors="coerce")
df["MOM"] = pd.to_numeric(df["MOM"], errors="coerce")
df.dropna(subset=["Date", "MOM"], inplace=True)
return df
mom_df = MomentumDownloader.parse_momentum_data()
class StockDataFetcher:
"""Downloads and processes stock data from Yahoo Finance."""
@staticmethod
def get_stock_data(ticker="MSFT", start_date="1990-01-01"):
raw_df = yf.download(ticker, start=start_date, progress=False, group_by="ticker")
if isinstance(raw_df.columns, pd.MultiIndex):
raw_df.columns = raw_df.columns.droplevel(0)
raw_df.reset_index(inplace=True)
raw_df.rename(columns={"Date": "Date"}, inplace=True)
price_col = "Adj Close" if "Adj Close" in raw_df.columns else "Close"
daily_return_col = f"Daily Return ({ticker})"
raw_df[daily_return_col] = raw_df[price_col].pct_change() * 100
# Rename the raw "Adj Close" so each ticker is unique
raw_df.rename(columns={price_col: f"Adj Close ({ticker})"}, inplace=True)
return raw_df[["Date", f"Adj Close ({ticker})", daily_return_col]]
ticker = "PORTFOLIO"
# Build a weighted portfolio
merged_portfolio = None
for (t, w) in portfolio:
tmp = StockDataFetcher.get_stock_data(t)
# Weighted return
tmp[f"Weighted Return ({t})"] = tmp[f"Daily Return ({t})"] * w
# Merge with main
if merged_portfolio is None:
merged_portfolio = tmp.copy()
else:
# Merge with suffixes so we don't produce duplicates
merged_portfolio = pd.merge(
merged_portfolio, tmp, on="Date", how="outer", suffixes=("", f"_{t}")
)
merged_portfolio.sort_values("Date", inplace=True)
merged_portfolio.fillna(0.0, inplace=True) # or ffill if you prefer
# Sum all Weighted Return(...) columns
weight_cols = [col for col in merged_portfolio.columns if "Weighted Return" in col]
merged_portfolio["Portfolio Return"] = merged_portfolio[weight_cols].sum(axis=1)
# Build "Portfolio Adj Close" from those returns
merged_portfolio["Portfolio Adj Close"] = 100.0 * (
1.0 + merged_portfolio["Portfolio Return"] / 100.0
).cumprod()
# Keep only the columns we need for the final factor merge
# to avoid duplicates
merged_portfolio.rename(
columns={
"Portfolio Return": f"Daily Return ({ticker})",
"Portfolio Adj Close": "Adj Close"
},
inplace=True
)
# Now we only keep: "Date", "Adj Close", "Daily Return (PORTFOLIO)"
final_cols = ["Date", "Adj Close", f"Daily Return ({ticker})"]
stock_df = merged_portfolio[final_cols].copy()
# Merge with factors
merged_factors_df = pd.merge(ff5_df, mom_df, on="Date", how="outer")
final_df = pd.merge(stock_df, merged_factors_df, on="Date", how="inner")
final_df.dropna(inplace=True)
final_df["Date"] = pd.to_datetime(final_df["Date"])
with st.expander("View Merged Data", expanded=False):
st.dataframe(final_df)
# ==============================================
# 2. Factor Series Visualization
# ==============================================
st.header("2. Factor Series Visualization")
st.write("We plot each factor’s percentage series and a corresponding cumulative level.")
with st.expander("Expand for Factor Series Visualization", expanded=False):
exclude_cols = ['Date','Adj Close', f"Daily Return ({ticker})"]
all_cols = [col for col in final_df.columns if col not in exclude_cols]
cols_to_plot = [col for col in all_cols if col in selected_factors]
if len(cols_to_plot) == 0:
cols_to_plot = all_cols
num_plots = len(cols_to_plot)
if num_plots == 0:
st.warning("No factors to plot. Please select at least one factor.")
else:
fig = make_subplots(
rows=num_plots,
cols=1,
shared_xaxes=True,
subplot_titles=cols_to_plot,
specs=[[{"secondary_y": True}] for _ in range(num_plots)]
)
for i, col in enumerate(cols_to_plot, start=1):
pct_series = final_df[col]
log_return = np.log1p(pct_series / 100)
cum_log_return = log_return.cumsum()
level_series = 100 * np.exp(cum_log_return - cum_log_return.iloc[0])
fig.add_trace(
go.Scatter(
x=final_df['Date'],
y=pct_series,
mode='lines',
name=f'{col} %',
opacity=0.5,
line=dict(color='red', width=2)
),
row=i, col=1, secondary_y=False
)
fig.add_trace(
go.Scatter(
x=final_df['Date'],
y=level_series,
mode='lines',
name=f'{col} Level',
line=dict(color='blue')
),
row=i, col=1, secondary_y=True
)
if level_series.max() > 1e6:
fig.update_yaxes(type="log", row=i, col=1, secondary_y=False)
fig.update_yaxes(title_text=f"{col} %", row=i, col=1, secondary_y=False)
fig.update_yaxes(title_text=f"{col} Level", row=i, col=1, secondary_y=True)
fig.update_xaxes(tickangle=45, title_text="Date")
fig.update_layout(
height=600 * num_plots,
width=2000,
title_text="Factor Percentage & Level Series",
showlegend=False,
)
st.plotly_chart(fig, use_container_width=True)
# ==============================================
# 3. OLS Regression
# ==============================================
st.header("3. Factor Regression")
st.write(f"We compute {ticker}'s excess returns and regress them on the Fama-French 5-Factor plus Momentum. Below is the regression summary and a detailed interpretation.")
final_df["Excess Return"] = final_df[f"Daily Return ({ticker})"] - final_df["RF"]
X = final_df[["MKT_RF", "SMB", "HML", "RMW", "CMA", "MOM"]]
X = sm.add_constant(X)
y = final_df["Excess Return"]
model = sm.OLS(y, X).fit()
st.markdown("**Regression Summary**")
st.text(model.summary().as_text())
# Extract parameters for interpretation
alpha = model.params['const']
alpha_pval = model.pvalues['const']
market_beta = model.params["MKT_RF"]
size_beta = model.params["SMB"]
value_beta = model.params["HML"]
profitability_beta = model.params["RMW"]
investment_beta = model.params["CMA"]
momentum_beta = model.params["MOM"]
sig_threshold = 0.05
with st.expander("Expand for Factor Regression Interpretation", expanded=False):
st.markdown("##### Alpha (Intercept)")
st.markdown(f"- **Value:** {alpha:.4f}, **p-value:** {alpha_pval:.4f}")
if alpha_pval < sig_threshold:
if alpha > 0:
st.markdown(f"- {ticker} has positive alpha, meaning it has historically outperformed expected returns. This suggests systematic mispricing or missing risk factors.")
else:
st.markdown(f"- {ticker} has negative alpha, meaning it has underperformed its risk-adjusted return expectations. This may indicate structural shifts in fundamentals.")
else:
st.markdown(f"- Alpha is not statistically significant. {ticker}'s returns are largely explained by market risk and factor exposures.")
st.markdown("##### Market Beta (MKT_RF)")
st.markdown(f"- **Beta:** {market_beta:.4f}")
if market_beta > 1:
st.markdown(f"- {ticker} is more volatile than the market. It carries higher risk but also offers higher return potential.")
elif market_beta < 1:
st.markdown(f"- {ticker} is less volatile than the market, suggesting lower downside risk.")
else:
st.markdown(f"- {ticker} moves closely with the market.")
st.markdown("##### Size Factor (SMB)")
st.markdown(f"- **Beta:** {size_beta:.4f}")
if size_beta > 0:
st.markdown(f"- {ticker} exhibits some small-cap characteristics, meaning it shares behaviors with smaller, riskier firms.")
else:
st.markdown(f"- {ticker} behaves like a large-cap stock, which aligns with expectations for a company of its size.")
st.markdown("##### Value Factor (HML)")
st.markdown(f"- **Beta:** {value_beta:.4f}")
if value_beta > 0:
st.markdown(f"- {ticker} has some value-stock characteristics, which is unusual for a tech firm. Investors may be treating it like a more stable company.")
else:
st.markdown(f"- {ticker} behaves like a growth stock, meaning it trades at high valuation multiples and is expected to grow earnings.")
st.markdown("##### Profitability Factor (RMW)")
st.markdown(f"- **Beta:** {profitability_beta:.4f}")
if profitability_beta > 0:
st.markdown(f"- {ticker} is a high-profitability stock, which aligns with strong margins and earnings quality.")
else:
st.markdown(f"- {ticker} has characteristics of lower-profitability firms, which could suggest fundamental weaknesses.")
st.markdown("##### Investment Factor (CMA)")
st.markdown(f"- **Beta:** {investment_beta:.4f}")
if investment_beta > 0:
st.markdown(f"- {ticker} follows a conservative investment strategy, meaning it is more disciplined in capital allocation.")
else:
st.markdown(f"- {ticker} follows an aggressive investment strategy, prioritizing growth and expansion.")
st.markdown("##### Momentum Factor (MOM)")
st.markdown(f"- **Beta:** {momentum_beta:.4f}")
if momentum_beta > 0:
st.markdown(f"- {ticker} exhibits strong momentum characteristics, meaning past performance trends tend to persist.")
else:
st.markdown(f"- {ticker} does not exhibit strong momentum effects and may experience more reversals.")
st.markdown("##### Trading Implications")
if alpha_pval < sig_threshold:
if alpha > 0:
st.markdown(f"- {ticker} has historically generated positive alpha. Consider maintaining a long position when macro conditions are favorable.")
else:
st.markdown(f"- {ticker} has historically generated negative alpha. Consider shorting or hedging in periods of overvaluation.")
if size_beta < 0 and value_beta < 0 and profitability_beta > 0:
st.markdown(f"- {ticker} behaves as a large-cap, high-growth, high-profitability stock. Its performance is sensitive to macroeconomic conditions and interest rates.")
if investment_beta < 0:
st.markdown(f"- {ticker}'s aggressive reinvestment strategy suggests it could outperform in bullish markets but underperform during economic slowdowns.")
if momentum_beta > 0:
st.markdown(f"- {ticker} benefits from trend-following strategies. Consider buying on breakouts rather than mean-reversion.")
# ==============================================
# 4. Rolling Regressions
# ==============================================
st.header("4. Rolling Regressions")
st.write("We use a rolling window to estimate alpha and betas over time, plus the absolute residual error as a measure of fit.")
window = rolling_window
rolling_dates = []
rolling_alphas = []
rolling_residuals = []
rolling_betas = {"MKT_RF": [], "SMB": [], "HML": [], "RMW": [], "CMA": [], "MOM": []}
rolling_pvals = {"alpha": []}
rolling_pvals.update({factor: [] for factor in rolling_betas.keys()})
for i in range(window, len(final_df)):
subset = final_df.iloc[i-window:i] # Select 1-year window
y = subset[f"Daily Return ({ticker})"] - subset["RF"]
X = subset[["MKT_RF", "SMB", "HML", "RMW", "CMA", "MOM"]]
X = sm.add_constant(X)
model = sm.OLS(y, X).fit()
# Store actual date from `final_df`
rolling_dates.append(final_df.iloc[i]["Date"])
# Store alpha (intercept)
rolling_alphas.append(model.params["const"])
# Store mean absolute residual error (good for tracking model accuracy over time)
rolling_residuals.append(np.mean(np.abs(model.resid))) # Mean Abs Error
# Store betas
for factor in rolling_betas.keys():
rolling_betas[factor].append(model.params[factor])
rolling_results = pd.DataFrame({
"Date": rolling_dates,
"Alpha": rolling_alphas,
"Error": rolling_residuals # Include error column
})
for factor in rolling_betas.keys():
rolling_results[f"{factor} Beta"] = rolling_betas[factor]
rolling_results["Date"] = pd.to_datetime(rolling_results["Date"]) # Ensure datetime format
rolling_results.set_index("Date", inplace=True) # Set Date as index
stock_prices = final_df.set_index("Date")["Adj Close"].copy() # Ensure Date is used as index
stock_returns = final_df.set_index("Date")[f"Daily Return ({ticker})"].copy()
stock_prices = stock_prices.reindex(rolling_results.index)
stock_returns = stock_returns.reindex(rolling_results.index)
st.write("#### Rolling Regression Plots")
def get_segments(dates, mask):
segments = []
start = None
for i, flag in enumerate(mask):
if flag and start is None:
start = dates[i]
elif not flag and start is not None:
end = dates[i - 1]
segments.append((start, end))
start = None
if start is not None:
segments.append((start, dates[-1]))
return segments
fig = make_subplots(
rows=3, cols=1,
shared_xaxes=True,
subplot_titles=[
"Rolling Factor Betas & Alpha (1-Year Windows)",
f"{ticker} Stock Price & Returns with Overvalued/Undervalued Regions",
"Rolling Regression Error (Mean Absolute Residuals)"
],
specs=[
[{"secondary_y": False}],
[{"secondary_y": True}],
[{"secondary_y": False}]
]
)
# Subplot 1: Rolling Betas & Alpha
for factor in rolling_betas.keys():
fig.add_trace(
go.Scatter(
x=rolling_results.index,
y=rolling_results[f"{factor} Beta"],
mode="lines",
name=f"{factor} Beta"
),
row=1, col=1
)
fig.add_trace(
go.Scatter(
x=rolling_results.index,
y=rolling_results["Alpha"],
mode="lines",
name="Alpha",
line=dict(color="black", dash="dash")
),
row=1, col=1
)
# Add horizontal line at y=0 in subplot 1.
fig.add_shape(
type="line",
x0=rolling_results.index[0],
x1=rolling_results.index[-1],
y0=0,
y1=0,
line=dict(color="black", dash="dot", width=1),
xref="x1", yref="y1"
)
# Subplot 2: Stock Price & Returns with Fill Regions
fig.add_trace(
go.Scatter(
x=stock_prices.index,
y=stock_prices,
mode="lines",
name=f"{ticker} Stock Price",
line=dict(color="blue", width=1.5)
),
row=2, col=1, secondary_y=False
)
fig.add_trace(
go.Scatter(
x=stock_returns.index,
y=stock_returns,
mode="lines",
name=f"{ticker} Daily Returns",
line=dict(color="gray"),
opacity=0.6
),
row=2, col=1, secondary_y=True
)
mask_undervalued = (rolling_results["Alpha"] > 0).values
mask_overvalued = (rolling_results["Alpha"] <= 0).values
dates_list = list(stock_prices.index)
y_min = stock_prices.min()
y_max = stock_prices.max()
undervalued_segments = get_segments(dates_list, mask_undervalued)
overvalued_segments = get_segments(dates_list, mask_overvalued)
# Add fill traces for undervalued segments.
first = True
for seg in undervalued_segments:
seg_x = [seg[0], seg[1], seg[1], seg[0], seg[0]]
seg_y = [y_min, y_min, y_max, y_max, y_min]
fig.add_trace(
go.Scatter(
x=seg_x,
y=seg_y,
fill="toself",
mode="lines",
line=dict(color="rgba(0,0,0,0)"),
fillcolor="green",
opacity=0.2,
showlegend=first,
name="Undervalued"
),
row=2, col=1, secondary_y=False
)
first = False
# Add fill traces for overvalued segments.
first = True
for seg in overvalued_segments:
seg_x = [seg[0], seg[1], seg[1], seg[0], seg[0]]
seg_y = [y_min, y_min, y_max, y_max, y_min]
fig.add_trace(
go.Scatter(
x=seg_x,
y=seg_y,
fill="toself",
mode="lines",
line=dict(color="rgba(0,0,0,0)"),
fillcolor="red",
opacity=0.2,
showlegend=first,
name="Overvalued"
),
row=2, col=1, secondary_y=False
)
first = False
# Subplot 3: Rolling Regression Error
fig.add_trace(
go.Scatter(
x=rolling_results.index,
y=rolling_results["Error"],
mode="lines",
name="Error",
line=dict(color="red", width=1.2)
),
row=3, col=1
)
mean_error = rolling_results["Error"].mean()
fig.add_trace(
go.Scatter(
x=[rolling_results.index[0], rolling_results.index[-1]],
y=[mean_error, mean_error],
mode='lines',
name="Mean Error",
line=dict(color="black", dash="dot", width=1)
),
row=3, col=1
)
fig.update_xaxes(dtick="M3", tickformat="%Y-%m", tickangle=90, row=3, col=1)
fig.update_layout(
height=1200,
width=2000,
showlegend=True,
title_text="Combined Plotly Figure"
)
st.plotly_chart(fig, use_container_width=True)
# ==============================================
# 5. Stationarity & Mean-Reversion Tests
# ==============================================
st.header("5. Stationarity & Mean-Reversion Tests")
st.write(f"We focus on {ticker}'s rolling alpha to check if it is mean-reverting or not.")
# Create a copy of the rolling_results DataFrame and drop NaN values
df_copy = rolling_results.copy().dropna(subset=['Alpha'])
# 1. Augmented Dickey-Fuller (ADF) Test
adf_result = adfuller(df_copy['Alpha'])
# 2. Autoregressive Model (AR-1)
ar_model = AutoReg(df_copy['Alpha'], lags=1).fit()
rho = ar_model.params[1] # AR(1) coefficient
# 3. Half-Life Calculation
half_life = np.log(0.5) / np.log(abs(rho))
# 4. Variance Ratio Test
def variance_ratio_test(series, lag=4):
series = series.dropna()
var_1 = np.var(series.diff(1).dropna(), ddof=1)
var_k = np.var(series.diff(lag).dropna(), ddof=1)
vr_stat = var_k / (var_1 * lag)
return vr_stat
vr_stat = variance_ratio_test(df_copy['Alpha'], lag=4)
with st.expander("Stationarity & Mean-Reversion Interpretation", expanded=False):
st.markdown("##### Augmented Dickey-Fuller (ADF) Test")
st.markdown(f"**ADF Statistic:** {adf_result[0]:.4f}")
st.markdown(f"**p-value:** {adf_result[1]:.4f}")
if adf_result[1] < 0.05:
st.markdown("- **Alpha is mean-reverting (stationary).**")
st.markdown("- Alpha tends to return to its mean over time.")
st.markdown("- This suggests that exploitable inefficiencies exist.")
else:
st.markdown("- **Alpha follows a random walk (not mean-reverting).**")
st.markdown("- Alpha does not consistently revert.")
st.markdown("- Short-term mean-reversion strategies may be ineffective.")
st.markdown("##### Autoregressive Model (AR-1)")
st.markdown(f"**AR(1) Coefficient (rho):** {rho:.4f}")
if rho < 1:
st.markdown("- **Alpha exhibits mean-reversion behavior.**")
st.markdown("- A lower rho means alpha corrects itself over time.")
st.markdown("- Trading strategies exploiting mispricing may be viable.")
else:
st.markdown("- **Alpha behaves like a random walk.**")
st.markdown("- Alpha does not reliably revert to its mean.")
st.markdown("- Mispricing trades become less predictable.")
st.markdown("##### Half-Life of Mean Reversion")
st.markdown(f"**Half-Life of Alpha:** {half_life:.2f} periods")
if half_life < 20:
st.markdown("- **Alpha reverts quickly** (short-term inefficiency).")
st.markdown("- Mispricing corrections occur fast.")
st.markdown("- Short-term mean-reversion strategies can work.")
elif 20 <= half_life < 100:
st.markdown("- **Alpha reverts at a moderate speed.**")
st.markdown("- Inefficiencies last for weeks or months.")
st.markdown("- Consider medium-term strategies.")
else:
st.markdown("- **Alpha takes a long time to revert** (persistent mispricing).")
st.markdown("- Corrections happen slowly.")
st.markdown("- A longer holding period may be needed.")
st.markdown("##### Variance Ratio Test")
st.markdown(f"**Variance Ratio:** {vr_stat:.4f}")
if vr_stat < 1:
st.markdown("- **Alpha is mean-reverting.**")
st.markdown("- Mispricing is temporary and corrects over time.")
st.markdown("- Systematic strategies may work well.")
else:
st.markdown("- **Alpha follows a random walk.**")
st.markdown("- Arbitrage opportunities may not persist long enough.")
st.markdown("### Summary")
if adf_result[1] < 0.05 and vr_stat < 1 and rho < 1:
st.markdown("- Overall, alpha shows mean-reverting behavior.")
if half_life < 20:
st.markdown("- Short-term inefficiencies correct quickly. Focus on fast mean-reversion plays.")
elif 20 <= half_life < 100:
st.markdown("- Inefficiencies persist for weeks or months. Use medium-term strategies.")
else:
st.markdown("- Alpha takes a long time to revert. Consider long-term value strategies.")
else:
st.markdown("- Alpha does not show strong mean-reverting behavior.")
st.markdown("- Factor-based or macro-driven strategies may be more effective.")
# ==============================================
# 6. Rolling Half-Life Analysis
# ==============================================
st.header("6. Rolling Half-Life Analysis")
st.write(f"This section demonstrates how the half-life of {ticker}'s alpha changes over time.")
# Compute a rolling half-life using a 60-observation window on the alpha series
hl_window = 60
rolling_half_life_vals = []
hl_dates = []
alpha_series = df_copy["Alpha"].values
dates_hl = df_copy.index
for i in range(hl_window, len(alpha_series)):
window_alpha = alpha_series[i - hl_window:i]
y_curr = window_alpha[1:]
y_lag = window_alpha[:-1]
if np.std(y_lag) > 0:
rho_est = np.polyfit(y_lag, y_curr, 1)[0]
if 0 < np.abs(rho_est) < 1:
hl = np.log(0.5) / np.log(np.abs(rho_est))
else:
hl = np.nan
else:
hl = np.nan
rolling_half_life_vals.append(hl)
hl_dates.append(dates_hl[i])
hl_series = pd.Series(rolling_half_life_vals, index=hl_dates)
overall_half_life_threshold = 145 # Example threshold
# Plot the Rolling Half-Life Chart
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=hl_series.index,
y=hl_series,
mode='lines',
line=dict(color='red'),
name="Rolling Half-Life"
)
)
fig.add_trace(
go.Scatter(
x=[hl_series.index.min(), hl_series.index.max()],
y=[overall_half_life_threshold, overall_half_life_threshold],
mode='lines',
line=dict(color='white', dash='dash'),
name="Overall Half-Life Threshold"
)
)
fig.update_layout(
title=f"Rolling Half-Life of {ticker}'s Alpha Over Time",
xaxis_title="Time",
yaxis_title="Half-Life (Periods)",
width=1200,
height=500,
showlegend=True
)
st.plotly_chart(fig, use_container_width=True)
# Save the computed rolling half-life to the DataFrame for interpretation
df_copy["Rolling_Half_Life"] = hl_series
with st.expander("Rolling Half-Life Interpretation", expanded=False):
st.markdown("##### Overall Mean Reversion Behavior")
rolling_half_life_clean = df_copy["Rolling_Half_Life"].dropna()
average_half_life = rolling_half_life_clean.mean()
st.markdown(f"**The average rolling half-life of {ticker}'s alpha is {average_half_life:.2f} periods.**")
if average_half_life < 100:
st.markdown(f"- {ticker}'s alpha mean-reverts quickly, suggesting market inefficiencies are corrected within a few months.")
elif 100 <= average_half_life < 200:
st.markdown(f"- {ticker}'s alpha mean-reverts at a moderate speed, meaning mispricing persists for an extended period before correction.")
else:
st.markdown(f"- {ticker}'s alpha has a long half-life, meaning mispricing takes a long time to correct. This may indicate strong inefficiencies or structural shifts.")
st.markdown("##### Periods of High and Low Mean Reversion")
high_half_life_periods = rolling_half_life_clean[rolling_half_life_clean > overall_half_life_threshold].count()
low_half_life_periods = rolling_half_life_clean[rolling_half_life_clean < overall_half_life_threshold].count()
st.markdown(f"- **Extended half-life (>{overall_half_life_threshold} periods):** {high_half_life_periods} periods")
st.markdown(f"- **Faster corrections (<{overall_half_life_threshold} periods):** {low_half_life_periods} periods")
if high_half_life_periods > low_half_life_periods:
st.markdown("- On average, mispricing tends to persist longer rather than correcting quickly.")
else:
st.markdown("- Mispricing corrections occur more frequently than prolonged persistence, indicating relatively efficient price adjustments.")
st.markdown("##### Market Event Sensitivity")
recent_half_life = rolling_half_life_clean.iloc[-1]
st.markdown(f"**The most recent rolling half-life value is {recent_half_life:.2f} periods.**")
if recent_half_life > overall_half_life_threshold:
st.markdown(f"- Currently, {ticker}'s alpha is taking longer to mean-revert, suggesting slower market corrections.")
else:
st.markdown(f"- Currently, {ticker}'s alpha is mean-reverting quickly, indicating increased market efficiency.")
st.markdown("##### Trading Implications")
if recent_half_life > 200:
st.markdown(f"- {ticker}'s mispricing is persistent. Long-horizon trades are preferable since short-term corrections are less reliable.")
elif 100 < recent_half_life < 200:
st.markdown(f"- {ticker}'s alpha corrects in a medium time frame. Structure trades over several months rather than weeks.")
else:
st.markdown(f"- {ticker}'s alpha corrects quickly. Short-term mean-reversion strategies may be effective.")
# ==============================================
# 7. Expected vs. Actual Excess Return
# ==============================================
st.header("7. Expected vs. Actual Excess Return")
st.write(f"We compare the model's expected excess return to {ticker}'s actual excess return and measure the fit (R²).")
# Compute expected and actual excess returns
factor_contributions = {}
for factor in rolling_betas.keys():
factor_contributions[factor] = rolling_results[f"{factor} Beta"] * final_df.set_index("Date").loc[rolling_results.index, factor]
expected_return = rolling_results["Alpha"] + sum(factor_contributions.values())
actual_excess_return = (
final_df.set_index("Date").loc[rolling_results.index, f"Daily Return ({ticker})"]
- final_df.set_index("Date").loc[rolling_results.index, "RF"]
)
# Calculate R-squared, regression slope, and intercept
corr_matrix = np.corrcoef(expected_return, actual_excess_return)
r_squared = corr_matrix[0, 1] ** 2
slope, intercept = np.polyfit(expected_return, actual_excess_return, 1)
# Create regression line for the scatter plot
x_line = np.linspace(expected_return.min(), expected_return.max(), 100)
y_line = intercept + slope * x_line
# Build the scatter plot with regression line
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=expected_return,
y=actual_excess_return,
mode='markers',
name='Data',
marker=dict(opacity=0.5)
)
)
fig.add_trace(
go.Scatter(
x=x_line,
y=y_line,
mode='lines',
name='Regression Line',
line=dict(color='red')
)
)
fig.update_layout(
title=f"Scatter Plot of Expected vs. Actual Excess Return (Rolling Factor Model)",
xaxis_title="Expected Return (Factor Model)",
yaxis_title="Actual Excess Return",
width=2000,
height=800,
showlegend=True
)
fig.add_annotation(
x=0.05,
y=0.95,
xref="paper",
yref="paper",
text=f"R² = {r_squared:.3f}",
showarrow=False,
font=dict(size=12, color="white")
)
st.plotly_chart(fig, use_container_width=True)
# ---------------------------
# Interpretation Section
# ---------------------------
with st.expander("Model Fit and Returns Interpretation", expanded=False):
st.markdown("##### R-Squared Interpretation")
st.markdown(f"**The model's R² value is {r_squared:.3f}.**")
if r_squared > 0.8:
st.markdown(f"- The factor model explains a high share of {ticker}'s excess return variability. This suggests systematic risk factors drive returns.")
elif 0.5 < r_squared <= 0.8:
st.markdown(f"- The model shows a moderate fit. Some systematic risk is captured, but other factors likely influence returns.")
else:
st.markdown(f"- The model has low explanatory power. Idiosyncratic risks or missing factors may play a significant role.")
st.markdown("##### Regression Line Fit")
st.markdown(f"**The regression slope is {slope:.3f}, and the intercept is {intercept:.3f}.**")
if slope > 1:
st.markdown(f"- Actual returns are more sensitive than the model suggests. {ticker}'s price reacts strongly to market conditions.")
elif slope < 1:
st.markdown(f"- Actual returns are less sensitive than the model suggests. This could mean smoother price adjustments or lower risk exposure.")
else:
st.markdown(f"- The model predicts returns with nearly correct sensitivity.")
st.markdown("##### Residual Dispersion")
residual_variance = np.var(actual_excess_return - expected_return)
st.markdown(f"**The residual variance is {residual_variance:.3f}.**")
if residual_variance > 0.05:
st.markdown("- There is significant unexplained variation. Missing factors or noisy stock-specific movements may exist.")
else:
st.markdown("- The model captures most return variability with limited unexplained deviations.")
st.markdown("##### Trading Implications")
if r_squared > 0.8:
st.markdown(f"- Factor exposures explain most of {ticker}'s returns. Mean-reversion strategies using factor models may work.")
elif r_squared < 0.5:
st.markdown(f"- Factor models do not strongly explain returns. Consider alternative signals such as earnings reactions or momentum strategies.")
if slope > 1:
st.markdown(f"- {ticker} reacts strongly to risk factors, suggesting higher volatility trading strategies may be effective.")
elif slope < 1:
st.markdown(f"- {ticker} shows dampened responses, favoring defensive positioning in volatile markets.")
# ==============================================
# 8. Cumulative Factor Contributions
# ==============================================
st.header("8. Cumulative Factor Contributions")
st.write(f"We accumulate each factor’s return contribution over time, plus the 'Stock-Specific Return' (unexplained by the model).")
# Build the factor contributions DataFrame
factor_contributions_df = pd.DataFrame(factor_contributions)
factor_contributions_df["Stock-Specific Return"] = (
actual_excess_return
- (expected_return - rolling_results["Alpha"]) # effectively residual
)
factor_contributions_cumsum = factor_contributions_df.cumsum()
# Create the cumulative contributions chart
fig = go.Figure()
for column in factor_contributions_cumsum.columns:
fig.add_trace(
go.Scatter(
x=factor_contributions_cumsum.index,
y=factor_contributions_cumsum[column],
mode="lines",
name=column,
opacity=0.8
)
)
# Add a horizontal line at y=0
fig.add_shape(
type="line",
x0=factor_contributions_cumsum.index[0],
x1=factor_contributions_cumsum.index[-1],
y0=0,
y1=0,
line=dict(color="white", dash="dash", width=1)
)
fig.update_layout(
title=f"Cumulative Factor Contributions to {ticker}'s Excess Return",
xaxis_title="Date",
yaxis_title="Cumulative Return Contribution",
width=2000,
height=800,
legend=dict(x=1.01, y=1, bordercolor="white", borderwidth=1)
)
fig.update_xaxes(
tickformat="%Y",
dtick="M12",
tickangle=45
)
st.plotly_chart(fig, use_container_width=True)
with st.expander("Expand for Factor Contribution Interpretation", expanded=False):
st.markdown("##### Largest Contributing Factor")
factor_cumulative_returns = factor_contributions_cumsum.iloc[-1]
largest_factor = factor_cumulative_returns.abs().idxmax()
largest_factor_value = factor_cumulative_returns[largest_factor]
st.markdown(
f"**The most significant factor contributing to {ticker}'s excess returns is `{largest_factor}` with a cumulative return contribution of `{largest_factor_value:.3f}`.**"
)
if largest_factor in ["MKT_RF", "SMB", "HML", "RMW", "CMA", "MOM"]:
st.markdown(
f"- {ticker}'s excess returns are largely driven by the **{largest_factor}** factor, meaning its performance is closely tied to {largest_factor.lower()} risk exposure."
)
else:
st.markdown(
f"- {ticker}’s return is less influenced by systematic risk factors and more by stock-specific movements."
)
st.markdown("##### Stock-Specific Return Contribution")
stock_specific_contribution = factor_cumulative_returns["Stock-Specific Return"]
st.markdown(
f"**The stock-specific return (unexplained return) has a cumulative contribution of `{stock_specific_contribution:.3f}`.**"
)
if abs(stock_specific_contribution) > abs(largest_factor_value):
st.markdown(
f"- {ticker}'s excess returns are driven more by stock-specific factors than by the Fama-French factors. This suggests that company fundamentals, earnings surprises, or sentiment play a major role."
)
else:
st.markdown(
f"- {ticker}'s excess returns are largely explained by systematic risk factors."
)
st.markdown("##### Factor Importance Over Time")
positive_factors = factor_cumulative_returns[factor_cumulative_returns > 0].index.tolist()
negative_factors = factor_cumulative_returns[factor_cumulative_returns < 0].index.tolist()
st.markdown(f"- **Factors that positively contributed to {ticker}'s excess returns:** {', '.join(positive_factors)}")
st.markdown(f"- **Factors that negatively impacted {ticker}'s excess returns:** {', '.join(negative_factors)}")
st.markdown("##### Trading Implications")
if abs(stock_specific_contribution) > abs(largest_factor_value):
st.markdown(
f"- {ticker}'s returns are mostly idiosyncratic, meaning factor models have limited predictive power. Focus on company-specific events like earnings, product launches, and macroeconomic shifts."
)
else:
st.markdown(
f"- {ticker}'s returns are largely driven by systematic risk factors, meaning factor-based strategies can be used to predict movements."
)
if "MOM" in positive_factors:
st.markdown(f"- {ticker} has benefited from momentum. Trend-following strategies may be effective.")
if "HML" in negative_factors:
st.markdown(f"- {ticker} behaves like a growth stock rather than a value stock. Value-based trading strategies may not work well.")
if "CMA" in negative_factors:
st.markdown(f"- {ticker} follows an aggressive investment strategy, meaning its performance may be sensitive to interest rate changes.")
st.success("Analysis Complete. Scroll above to review each section. Adjust parameters in the sidebar to explore further.")
else:
st.info("Set parameters on the sidebar and click 'Run Analysis' to begin. Rolling Regression Analysis takes a few seconds to run.")
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)