# default_exp problems_chapter_2


#hide
from nbdev.showdoc import *


import pandas as pd

#Load data
df = pd.read_excel('../data/oxygen.xlsx',index_col='Day')
df.plot()


#set window value
N = 7

#Moving Average
df['MA'] = df['Demand'].rolling(window=N).mean()

#Result
float(df.tail(1)['MA'])


df['SES'] = df['Demand'].ewm(alpha=0.1, adjust=False).mean()
float(df.tail(1)['SES'])


import pandas as pd

#Load data
df = pd.read_excel('../data/dog-food.xlsx', index_col='Week')

df.plot()


from statsmodels.tsa.api import Holt


df.head(1)


holt = Holt(df['Demand'], initialization_method=None)

#Initialization_method equal to None corresponds to  (signal) I_1 = D_1 and (slope) S_1 = D_2 - D_1
I_1, S_1 = df['Demand'].iloc[0], df['Demand'].iloc[1] - df['Demand'].iloc[0]
holt.initial_values()[:-1] == (I_1, S_1)


fit = holt.fit(
    smoothing_level=0.2,
    smoothing_trend=0.1,
    optimized=False,
)

df['Holt'] = fit.fittedvalues


fcast = fit.forecast(5).rename("Holt's linear trend")

#result
fcast.tail(1)


df['Demand'].plot(marker='o', color='black', legend=True)
df['Holt'].plot(color='blue', legend=True)
fcast.plot(marker='o', color='green', legend=True)


from statsmodels.api import OLS, add_constant #Ordinary Least Squares
#from statsmodels.regression.linear_modeimport OLS


model = OLS(df['Demand'], add_constant(df.index))
results = model.fit()
df['OLS_fitted'] = results.fittedvalues
pred_df = pd.DataFrame(index=pd.RangeIndex(27, 31, name='Week'))
pred_df['OLS_prediction'] = results.predict(add_constant(pred_df.index))
print(results.summary())



#Result
pred_df.head(1)


df['Demand'].plot(marker='o', color='black', legend=True)
df['OLS_fitted'].plot(color='blue', legend=True)

pred_df['OLS_prediction'].plot(marker='o', color='green', legend=True)


import pandas as pd

from statsmodels.tsa.api import Holt

D_1, D_2 = 47.2, 52.3
df = pd.DataFrame({'Demand': [D_1, D_2]})
df.index = pd.RangeIndex(start=1, stop=3, name='Week')

df.head()


holt = Holt(df['Demand'], initialization_method=None)

#Initialization_method equal to None corresponds to  (signal) I_1 = D_1 and (slope) S_1 = D_2 - D_1
I_1, S_1 = df['Demand'].iloc[0], df['Demand'].iloc[1] - df['Demand'].iloc[0]
holt.initial_values()[:-1] == (I_1, S_1)


fit = holt.fit(
    smoothing_level=0.2,
    smoothing_trend=0.1,
    optimized=False,
)

df['Holt'] = fit.fittedvalues


fcast = fit.forecast(2).rename("Holt's linear trend")

#result
fcast.tail(1)


df['Demand'].plot(marker='o', color='black', legend=True)
df['Holt'].plot(color='blue', legend=True)
fcast.plot(marker='o', color='green', legend=True)


D_1, D_2, D_3 = 47.2, 52.3, 59.4
df = pd.DataFrame({'Demand': [D_1, D_2, D_3]})
df.index = pd.RangeIndex(start=1, stop=4, name='Week')

holt = Holt(df['Demand'], initialization_method=None)

fit = holt.fit(
    smoothing_level=0.2,
    smoothing_trend=0.1,
    optimized=False,
)

fcast = fit.forecast(2).rename("Holt's linear trend")

#result
fcast.tail(1)


import pandas as pd
import numpy as np

from statsmodels.tsa.api import ExponentialSmoothing

#Load data
df = pd.read_excel(
    '../data/potting-soil.xlsx',
    index_col='Month',
)

df.drop(
    columns=['Name'],
    inplace=True,
)


df.index = pd.date_range(
    '2019-01-01',
    freq='M',
    periods=len(df),
    name='Month',
)

df.plot(marker='o');


exponential_smoothing = ExponentialSmoothing(
    df['Demand'],
    seasonal_periods=12,
    seasonal='add',
    trend='add',
    freq='M',
    initial_level=df['Demand'].iloc[0],
    initial_trend=df['Demand'].iloc[1] - df['Demand'].iloc[0],
    initial_seasonal=12 * df['Demand'][:12] / df['Demand'][:12].sum(),
    initialization_method='known',
)


#exponential_smoothing.fit?


fit = exponential_smoothing.fit(
    smoothing_level=0.2,
    smoothing_trend=0.1,
    smoothing_seasonal=0.3,
)

df['HoltWinters'] = fit.fittedvalues


df['Demand'].plot(marker='o', color='black', legend=True)
df['HoltWinters'].plot(color='blue', legend=True)
#fcast.plot(marker='o', color='green', legend=True)


fcast = fit.forecast(1).rename("Holt-Winter's forecast")

#result
fcast.tail(1)


import pandas as pd

#Load data
df = pd.read_excel(
    '../data/bottled-water.xlsx',
    index_col='Match #',
)


from statsmodels.api import OLS, add_constant 


model = OLS(df['Demand'], add_constant(df['Temp']))
results = model.fit()
df['OLS'] = results.fittedvalues

print(results.summary())



print("beta_0: ", results.params[0])
print("beta_1: ", results.params[1])


import matplotlib.pyplot as plt

fig, ax = plt.subplots(figsize=(8, 6))

ax.plot(df['Temp'], df['Demand'], 'o', label="data")
ax.plot(df['Temp'], df['OLS'], 'r--.', label="OLS")
ax.legend(loc='best');


pred = pd.DataFrame({'Temp': [21.6, 27.3, 26.6]},
                    index=pd.RangeIndex(39, 42, name='Match #'))
#df = pd.concat([df, pred])
pred['OLS'] = results.predict(add_constant(pred['Temp']))
pred['OLS']


fig, ax = plt.subplots(figsize=(8, 6))

ax.plot(df['Temp'], df['Demand'], 'o', label="data")
ax.plot(pred['Temp'], pred['OLS'], 'o', label="OLS")
ax.legend(loc='best');


import pandas as pd
from sklearn import svm

#Load data
df = pd.read_excel(
    '../data/bottled-water.xlsx',
    index_col='Match #',
)
regr = svm.SVR()
regr.fit(
    X=df['Temp'].to_numpy().reshape(-1, 1),
    y=df['Demand'],
)




regr.predict([[21.6], [27.3], [26.6]])

pred = pd.DataFrame({'Temp': [21.6, 27.3, 26.6]},
                    index=pd.RangeIndex(39, 42, name='Match #'))

pred['SVR'] = regr.predict(pred['Temp'].to_numpy().reshape(-1, 1))


pred


import matplotlib.pyplot as plt

fig, ax = plt.subplots(figsize=(8, 6))

ax.plot(df['Temp'], df['Demand'], 'o', label="data")
ax.plot(pred['Temp'], pred['SVR'], 'o', label="OLS")
ax.legend(loc='best');


import numpy as np
import pandas as pd
from sklearn import svm
from statsmodels.tsa.api import ExponentialSmoothing
from statsmodels.tools import eval_measures


#Load data
df = pd.read_csv(
    '../data/retail_sales_data.csv',
    index_col='Date',
)


ex = np.array([1,2,1,0])
l =  ex != 0


p_e = np.full_like(ex,0)
p_e.flat[l]


df_93_2 = df[(df['Dept'] == 93) & (df['Store'] == 2)]
df_93_2.index = pd.to_datetime(df_93_2.index)

dff = df_93_2.resample('W').mean()


dff.shape == df_93_2.shape


from statsmodels.tsa.seasonal import STL

stl = STL(dff['Weekly_Sales'])
res = stl.fit()
fig = res.plot()





exponential_smoothing = ExponentialSmoothing(
    dff['Weekly_Sales'],
    seasonal_periods=52,
    seasonal='add',
    trend='add',
    freq='W',
    initialization_method='estimated',
)


fit = exponential_smoothing.fit(
    smoothing_level=0.15,
    smoothing_trend=0.15,
    smoothing_seasonal=0.15,
)

dff['HoltWinters'] = fit.fittedvalues


dff


dff['Weekly_Sales'].plot(marker='o', color='black', legend=True)
dff['HoltWinters'].plot(color='blue', legend=True)
#ffcast.plot(marker='o', color='green', legend=True)


ax = dff['Weekly_Sales'].plot(
    figsize=(10, 6),
    marker="o",
    color="black",
    title="Forecasts from Holt-Winters'  method",
)
ax.set_ylabel("Weekly Sales")
ax.set_xlabel("Year")
fit.fittedvalues.plot(ax=ax, style="--", color="red");


aust = dff['Weekly_Sales']
seasonal_periods = 52
fit1 = ExponentialSmoothing(
    aust,
    seasonal_periods=seasonal_periods,
    trend="add",
    seasonal="add",
    use_boxcox=True,
    initialization_method="estimated",
).fit()
fit2 = ExponentialSmoothing(
    aust,
    seasonal_periods=seasonal_periods,
    trend="add",
    seasonal="mul",
    use_boxcox=True,
    initialization_method="estimated",
).fit()
fit3 = ExponentialSmoothing(
    aust,
    seasonal_periods=seasonal_periods,
    trend="add",
    seasonal="add",
    damped_trend=True,
    use_boxcox=True,
    initialization_method="estimated",
).fit()
fit4 = ExponentialSmoothing(
    aust,
    seasonal_periods=seasonal_periods,
    trend="add",
    seasonal="mul",
    damped_trend=True,
    use_boxcox=True,
    initialization_method="estimated",
).fit()
results = pd.DataFrame(index=[
    r"$\alpha$", r"$\beta$", r"$\phi$", r"$\gamma$", r"$l_0$", "$b_0$", "SSE"
])
params = [
    "smoothing_level",
    "smoothing_trend",
    "damping_trend",
    "smoothing_seasonal",
    "initial_level",
    "initial_trend",
]
results["Additive"] = [fit1.params[p] for p in params] + [fit1.sse]
results["Multiplicative"] = [fit2.params[p] for p in params] + [fit2.sse]
results["Additive Dam"] = [fit3.params[p] for p in params] + [fit3.sse]
results["Multiplica Dam"] = [fit4.params[p] for p in params] + [fit4.sse]

ax = aust.plot(
    figsize=(10, 6),
    marker="o",
    color="black",
    title="Forecasts from Holt-Winters' multiplicative method",
)
ax.set_ylabel("Weekly Sales")
ax.set_xlabel("Year")
fit1.fittedvalues.plot(ax=ax, style="--", color="red")
fit2.fittedvalues.plot(ax=ax, style="--", color="green")

fit1.forecast(8).rename("Holt-Winters (add-add-seasonal)").plot(ax=ax,
                                                                style="--",
                                                                marker="o",
                                                                color="red",
                                                                legend=True)
fit2.forecast(8).rename("Holt-Winters (add-mul-seasonal)").plot(ax=ax,
                                                                style="--",
                                                                marker="o",
                                                                color="green",
                                                                legend=True)

plt.show()
print(
    "Figure 7.6: Forecasting Holt-Winters method with both additive and multiplicative seasonality."
)

results


MAD = eval_measures.meanabs
MSE = eval_measures.mse


def MAPE(y, y_hat, axis=0, zeros=np.nan):
    """
    Mean Absolute Percentage Error

    Parameters
    ----------
    y : array_like
      The actual value.
    y_hat : array_like
       The predicted value.
    axis : int
       Axis along which the summary statistic is calculated
    zeros : float
       Value to assign to error where y is zero

    Returns
    -------
    rmspe : ndarray or float
       Root Mean Squared Percentage Error along given axis.
    """
    y_hat = np.asarray(y_hat)
    y = np.asarray(y)
    error = y - y_hat
    loc = y != 0
    loc = loc.ravel()
    percentage_error = np.full_like(error, zeros)
    percentage_error.flat[loc] = error.flat[loc] / y.flat[loc]
    mape = np.mean(np.abs(percentage_error)) * 100
    return mape


MAD(dff['Weekly_Sales'], fit.fittedvalues)


MSE(dff['Weekly_Sales'], fit.fittedvalues)


MAPE(dff['Weekly_Sales'], fit.fittedvalues)


from pandas import pandas as pd
from statsmodels.tsa.arima.model import ARIMA

from statsmodels.tools.eval_measures import mse as MSE
from statsmodels.tools. eval_measures import  meanabs as MAD

def MA(endog, q, **args):
    return ARIMA(endog, order=(0, 0, q), **args)


nsample = 500
mu = 40
sigma = 6
e = np.random.normal(scale=sigma, size=nsample)
idx = pd.date_range(
    '2020-01-01',
    periods=nsample,
    freq='D',
    name='Day',
)
df = pd.DataFrame(index=idx)
df['Demand'] = 40 + e


ma_mod5 = MA(df, 5).fit()
fcast = ma_mod5.fittedvalues
fcast.name = 'MA'


df['Demand'].plot(legend=True)
fcast.plot(legend=True);


MAD(df['Demand'], fcast)


MSE(df['Demand'], fcast)


np.sqrt(ma_mod5.params['sigma2'])


1.25*MAD(df['Demand'], fcast)


from statsmodels.tsa.api import SimpleExpSmoothing

fit = SimpleExpSmoothing(df['Demand'], initialization_method="heuristic").fit(
    smoothing_level=0.1,
    optimized=False,
)

fcast = fit.fittedvalues
fcast.name = 'SES'


df['Demand'].plot(legend=True)
fcast.plot(legend=True);


MAD(df['Demand'], fcast)


MSE(df['Demand'], fcast)


np.sqrt(MSE(df['Demand'], fcast))


1.25 * MAD(df['Demand'], fcast)


import numpy as np
from pprint import pprint 


class Bass:

    def __init__(self, p, q, m):
        self.p = p
        self.q = q
        self.m = m
        self.params = {}

    def time_peak(self):
        self.params['time_peak'] = np.math.log(
            self.q / self.p) / (self.p + self.q)

    def demand_rate_at_peak(self):
        self.params['demand_rate_at_peak'] = self.m * (self.p +
                                                       self.q)**2 / (4 * self.q)

    def cumulative_demand_at_peak(self):
        self.params['cumulative_demand_at_peak'] = self.m * (
            self.q - self.p) / (2 * self.q)

    @staticmethod
    def model_at_peak(p, q, m):
        bass = Bass(p, q, m)

        bass.time_peak()
        bass.demand_rate_at_peak()
        bass.cumulative_demand_at_peak()

        pprint(bass.params)


p = 0.008
q = 0.421
m = 5.8 * 10**6


#cumulative_demand_at_peaak =  #Lphon 5 sold at peak
Bass.model_at_peak(p, q, m)



