import IPython
import IPython.display
import pickle
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from dataclasses import dataclass
import pandas as pd
import numpy as np
import keras_tuner as kt
from tensorflow.keras import regularizers
from tensorflow.keras.layers import LeakyReLU
import gc
from tensorflow.keras.callbacks import Callback
import tensorflow as tf
import os


# for reproducibility 提高可重复性
seed0=1337
np.random.seed(seed0)
tf.keras.utils.set_random_seed(seed0)
tf.config.experimental.enable_op_determinism()
tf.random.set_seed(seed0)

#to avoid seeing shortened text data in pandas cell
pd.set_option('display.max_colwidth', None)

for dirname, _, filenames in os.walk('D:/Haozip/store-sales-time-series-forecasting'):
    for filename in filenames:
        print(os.path.join(dirname, filename))

#reading datasets
train=pd.read_csv("D:/Haozip/store-sales-time-series-forecasting/train.csv")
test=pd.read_csv("D:/Haozip/store-sales-time-series-forecasting/test.csv")
holiday_events=pd.read_csv("D:/Haozip/store-sales-time-series-forecasting/holidays_events.csv")
oil=pd.read_csv("D:/Haozip/store-sales-time-series-forecasting/oil.csv")
stores=pd.read_csv("D:/Haozip/store-sales-time-series-forecasting/stores.csv")
transactions=pd.read_csv("D:/Haozip/store-sales-time-series-forecasting/transactions.csv")

#converting date feature to datetime data type 将date列设置为索引
train["date"] = pd.to_datetime(train.date)
test["date"] = pd.to_datetime(test.date)
holiday_events["date"] = pd.to_datetime(holiday_events.date)
oil["date"] = pd.to_datetime(oil.date)
transactions["date"] = pd.to_datetime(transactions.date)

#Generic helper function to plot time variant data 用于绘制时变数据的通用辅助函数
def plot_series(time, series, format="-", start=0, end=None):
    fig, ax = plt.subplots(figsize=(14,5))
    plt.plot(time[start:end], series[start:end], format)
    plt.xlabel("Time")
    plt.ylabel("Sales")
    plt.grid(True)
    plt.show()
    plt.close()

plot_series(train["date"], train["sales"], format="-", start=0, end=None)


# Zooming into 2017  放大2017年
plot_series(train["date"], train["sales"], format="-",
            start=-len(train["date"][train["date"] >=pd.to_datetime("2017-01-01")]), end=None)

# checking trend seasonality and residuals  检查趋势季节性和残差
import statsmodels.api as sm
t_series = train.groupby(['date'])['sales'].agg(['mean']).reset_index().rename(columns={'mean': 'msales'})
t_series = t_series.set_index('date')
sample= t_series['msales'].resample('MS').mean()
decomposition = sm.tsa.seasonal_decompose(sample, model='additive')
fig = decomposition.plot()
fig.set_size_inches((14, 6))
fig.tight_layout()
plt.show()
plt.close()


# helper function to do merge datasets  用于合并数据集的辅助函数
def joins(df,holiday_events,oil,stores):
    print(df.shape)
    holiday_events=holiday_events.drop_duplicates(subset=['date'], keep='last')
    df_holiday=pd.merge(df,holiday_events,how="left",on='date', validate="many_to_one")
    print(f"Shape after merging with holiday {df_holiday.shape}")
    df_holiday_oil=pd.merge(df_holiday,oil,how="left",on='date')
    print(f"Shape after merging with holiday + oil {df_holiday_oil.shape}")
    df_holiday_oil_stores=pd.merge(df_holiday_oil,stores,how="left",on="store_nbr",suffixes=("holiday","stores"))
    print(f"Shape after merging with holiday + oil + stores {df_holiday_oil_stores.shape}")
    return df_holiday_oil_stores

#merging datasets  合并数据集
train_merged=joins(train,holiday_events,oil,stores)
display(train_merged.head(1),train_merged.tail(1))
test_merged=joins(test,holiday_events,oil,stores)
display(test_merged.head(1),test_merged.tail(1))

# extracting time features for train dataset
train_merged['day_of_week'] = train_merged['date'].dt.day_of_week
train_merged['day_of_week'] = train_merged['day_of_week']+1
train_merged['month'] = train_merged['date'].dt.month
train_merged['year'] = train_merged['date'].dt.year

# extracting time features for test dataset
test_merged['day_of_week'] = test_merged['date'].dt.day_of_week
test_merged['day_of_week'] = test_merged['day_of_week']+1
test_merged['month'] = test_merged['date'].dt.month
test_merged['year'] = test_merged['date'].dt.year

# printing basic properties of features
display(train_merged.describe(include="all").T,train_merged.isna().sum() )
display(test_merged.describe(include="all").T,test_merged.isna().sum())

#checking value counts of holiday types
display(train_merged["typeholiday"].value_counts(dropna=False))
# checking categories of transferred feature of holiday dataset
display(train_merged["transferred"].unique())

# helper function to convert transfered holidays to normal days
def transfer_holiday_fix(df):
    df["typeholiday"]=np.where(df["transferred"]==True,'NDay',df["typeholiday"])
    df["typeholiday"]=np.where(df["typeholiday"]=='Work Day','NDay',df["typeholiday"])
    df["typeholiday"]=df["typeholiday"].fillna("NDay")
    return df

train_merged=transfer_holiday_fix(train_merged)
test_merged=transfer_holiday_fix(test_merged)
# checking new structure of holiday type column
display(train_merged["typeholiday"].value_counts(dropna=False))
display(test_merged["typeholiday"].value_counts(dropna=False))

#helper function to remove id columns used for joining datasets,
# note I made decision not to use store_nbr, because I am not using transactions to extract features
# but it can work as category feature for certain models,
# my decision was because it is an idendifier and useful information it contains also exist in store type and cluster
def select_relevant(df):
    print(f"shape before removing columns {df.shape}")
    features=["date","family","sales","onpromotion","typeholiday","dcoilwtico","city","state",
              "typestores","cluster","day_of_week","month","year"]
    if "sales" in df.columns:
        df= df[features]
    else:
        features.remove("sales")
        df= df[features]
    print(f"Shape after removing columns {df.shape}")
    return df
train_merged=select_relevant(train_merged)
test_merged=select_relevant(test_merged)

# plotting current distribution of oil price vs time
plot_series(train_merged["date"], train_merged["dcoilwtico"], format="-", start=0, end=None)

# helper function to interpolate oil price
def interpolate_oil(df):
    print(f"Shape before interpolating {df.shape}")
    df["dcoilwtico"]=np.where(df["dcoilwtico"] ==0, np.nan, df["dcoilwtico"])
    df.dcoilwtico.interpolate(limit_direction='both',inplace=True)
    print(f"Shape after interpolating {df.shape}")
    return df

train_merged=interpolate_oil(train_merged)
test_merged=interpolate_oil(test_merged)
display(f"Number of nan in train dataset oil price after interpolation {train_merged['dcoilwtico'].isna().sum()}")
display(f"Number of nan in test dataset oil price after interpolation {test_merged['dcoilwtico'].isna().sum()}")

plot_series(train_merged["date"], train_merged["dcoilwtico"], format="-", start=0, end=None)


def plot_by_category(df, category):
    fig, ax = plt.subplots(figsize=(14, 8))

    df_grouped = df.groupby(['date', category]).agg({'sales': 'mean'}).reset_index()
    sns.lineplot(data=df_grouped, x="date", y="sales", hue=category, ax=ax, legend="brief")

    # Shrink current axis by 20%
    box = ax.get_position()
    ax.set_position([box.x0, box.y0, box.width * 0.85, box.height])

    # Put a legend to the right of the current axis
    ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
    plt.show()
    plt.close()
categoricals=[ 'family', 'typeholiday', 'city', 'state', 'typestores', 'cluster']
for cat in categoricals:
    print(cat)
    plot_by_category(train_merged,cat)

time_features=["day_of_week","month","year"]
for t_feat in time_features:
    print(t_feat)
    plot_by_category(train_merged,t_feat)
    plot_series(train_merged[t_feat], train_merged["sales"], format="-", start=0, end=None)

# helper function to plot numeric features vs time in comparison to sales vs time
def plot_by_numeric(df,num_cols):
    fig, ax = plt.subplots(figsize=(14,8))
    cols_to_plot=["date","sales",num_cols]
    df=df[cols_to_plot]
    df_grouped=df.groupby(['date']).agg({'sales': 'mean', num_cols:'mean'}).reset_index()
    g1=sns.lineplot(data=df_grouped, x ="date", y='sales',legend='full', ax=ax)
    g2=sns.lineplot(data=df_grouped, x ="date", y=num_cols,legend='full', ax=ax)
    g1.set(yscale='log')
    g2.set(yscale='log')
    plt.show()
    plt.close()

numerics=['onpromotion','dcoilwtico']
for num_col in numerics:
    print(num_col)
    plot_by_numeric(train_merged,num_col)

#XGBoost
# helper function to split datset in to train and validation, validation set involves samples after 01.01.2017
def split(df):
    useful_features=['date', 'family','typeholiday','onpromotion', 'dcoilwtico','city', 'typestores',
                     'cluster','day_of_week', 'month','year']
    val_start_index=df.shape[0]-len(df["date"][df["date"] >=pd.to_datetime("2017-01-01")])
    train = df[:val_start_index]
    val=df[val_start_index:]
    X_train = train[useful_features]
    y_train = train["sales"]
    X_valid = val[useful_features]
    y_valid = val["sales"]
    return X_train, y_train, X_valid, y_valid

X_train, y_train, X_valid, y_valid =split(train_merged)

# helper functions to be able to get feature names out of functional transformer
def f_out_sin(self,input_features):
    return input_features
def f_out_cos(self,input_features):
    return input_features

# functions to transform time features with sine cosine transformation
def sin_transformer(period):
    return FunctionTransformer(lambda x: np.sin(x / period * 2 * np.pi),feature_names_out=f_out_sin)

def cos_transformer(period):
    return FunctionTransformer(lambda x: np.cos(x / period * 2 * np.pi), feature_names_out=f_out_cos)

time_feat=make_pipeline(
                        ColumnTransformer([
                            #("cyclic_day_of_week", periodic_spline_transformer(7, n_splines=3), ["day_of_week"]),
                            ("day_of_week_sin", sin_transformer(7), ["day_of_week"]),
                            ("day_of_week_cos", cos_transformer(7), ["day_of_week"]),
                            #("cyclic_month", periodic_spline_transformer(12, n_splines=6), ["month"]),
                            ("month_sin", sin_transformer(12), ["month"]),
                            ("month_cos", cos_transformer(12), ["month"]),
                            ("year_sin", sin_transformer(365), ["year"]),
                            ("year_cos", cos_transformer(365), ["year"]),
                            ],remainder='drop'),
    #Nystroem(kernel="poly", degree=2,n_jobs=-1, n_components=85, random_state=0),
    PolynomialFeatures(degree=2, interaction_only=True, include_bias=False))


# names of columns to train xgb
col_names_classic_ml=['family', 'typeholiday','onpromotion', 'dcoilwtico', 'city', 'typestores',
           'cluster', 'day_of_week','month','year']

# names of columns after pipeline transformations,
# note ordering of this list isn't arbitrary.
# I manually adjusted ordering after getting feature names out of pipeline and verfiying ordering
col_names_classic_ml_transformed=['family', 'typeholiday','city', 'typestores','cluster',
       'day_of_week_sin','day_of_week_cos', 'month_sin', 'month_cos','year_sin', 'year_cos',
       'day_of_week_sin day_of_week_cos','day_of_week_sin month_sin', 'day_of_week_sin month_cos',
       'day_of_week_sin year_sin', 'day_of_week_sin year_cos','day_of_week_cos month_sin',
       'day_of_week_cos month_cos','day_of_week_cos year_sin', 'day_of_week_cos year_cos','month_sin month_cos','month_sin year_sin','month_sin year_cos','month_cos year_sin','month_cos year_cos', 'year_sin year_cos',
       'onpromotion', 'dcoilwtico']

# building the pipeline to perform feature engineering
preprocess_pipe = Pipeline(steps=[
    ('encoder', ColumnTransformer(
                    transformers=[
                        ("category_trans",category_feat,category_columns),
                        ("time_trans",time_feat,["day_of_week","month","year"] ),
                                ],
                                remainder="passthrough", verbose_feature_names_out=True
                            )),
    ('scaler', MinMaxScaler()),
    ("pandarizer2", FunctionTransformer(lambda x: pd.DataFrame(x, columns =  col_names_classic_ml_transformed))) ],verbose = True)

preprocess_pipe.fit(X_train[col_names_classic_ml],y_train)

#fit transform data
preprocess_pipe.fit(X_train[col_names_classic_ml],y_train)
X_train=preprocess_pipe.transform(X_train[col_names_classic_ml])
X_valid=preprocess_pipe.transform(X_valid[col_names_classic_ml])

display(X_train.head(2),X_train.tail(2),X_train.shape )
display(X_valid.head(2),X_valid.tail(2), X_valid.shape)


