import IPython
import IPython.display
import pickle
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from dataclasses import dataclass
import pandas as pd
import numpy as np
import keras_tuner as kt
from tensorflow.keras import regularizers
from tensorflow.keras.layers import LeakyReLU
import gc
from tensorflow.keras.callbacks import Callback
import tensorflow as tf
import os

# making a copy of dataset I prepered before for training
dnn_df=train_merged.copy()
df_dnn_test_check=test_merged.copy()
# removing the 2nd copy to free memory
del train_merged
del test_merged
dnn_df["cat_gen"]=dnn_df["family"]+dnn_df["city"]+dnn_df["cluster"]
display(f'store_nbr nunique: {train["store_nbr"].nunique()}, family nunique: {dnn_df["family"].nunique()}, city nunique: {dnn_df["city"].nunique()}, cluster nunique: {dnn_df["cluster"].nunique()}, cat_gen nunique: {dnn_df["cat_gen"].nunique()}')

#col_names_dnn=['cat_gen', 'family','typeholiday', 'city','typestores', 'cluster','dcoilwtico', 'date','day_of_week', 'month', 'year']
col_names_dnn=['cat_gen', 'date']
dnn_df_x=dnn_df[col_names_dnn]
dnn_df_y=dnn_df["sales"]

# sub pipeline to apply said transformation on derived feature
category_feat_dnn=Pipeline(steps=[("target_encode",TargetEncoder(target_type="continuous")),
                                 ("combiner",KBinsDiscretizer(n_bins=130, strategy='kmeans',
                                  subsample=None, random_state=seed0,encode='ordinal'))
                                 ])


# engineering the said feature in pipeline
preprocess_pipe_dnn = Pipeline(steps=[
    ('encoder', ColumnTransformer(
                    transformers=[
                        ("category_trans",category_feat_dnn,["cat_gen"])
                                ],
                                remainder="passthrough", verbose_feature_names_out=True
                            )),
    ("pandarizer2", FunctionTransformer(lambda x: pd.DataFrame(x, columns =  col_names_dnn)))
                            ],verbose = True)

# converting date to string to avoid it getting transformed to float
dnn_df_x = dnn_df_x.astype({'date':'string'})
preprocess_pipe_dnn.fit(dnn_df_x,dnn_df_y)

# checking if the ordering of feature names are correct
display(preprocess_pipe_dnn['encoder'].feature_names_in_)
display(preprocess_pipe_dnn['encoder'].get_feature_names_out())

dnn_df_x=preprocess_pipe_dnn.transform(dnn_df_x)
display(dnn_df_x.head(2),dnn_df_x.tail(2),dnn_df_x.shape )

dnn_training_features= ['cat_gen', 'date']
dnn_df_x=dnn_df_x[dnn_training_features]
#concatenating X-y datasets
dnn_df_processed=pd.concat([dnn_df_x,dnn_df_y], axis=1)
# converting date to datetime dtype
dnn_df_processed["date"] = pd.to_datetime(dnn_df_processed.date)
dnn_df_processed["sales"] = dnn_df_processed["sales"].astype(np.float32)
dnn_df_processed.head(2)


# removing to free memory
del dnn_df_x
del dnn_df_y

# checking number of samples in each class of derived feature
display(dnn_df_processed["cat_gen"].value_counts() , dnn_df_processed["cat_gen"].nunique())

# applying the same transformations to test dataset
df_dnn_test_check["cat_gen"]=df_dnn_test_check["family"]+df_dnn_test_check["city"]+df_dnn_test_check["cluster"]
df_dnn_test_check = df_dnn_test_check.astype({'date':'string'})
df_dnn_test_check=preprocess_pipe_dnn.transform(df_dnn_test_check)
df_dnn_test_check=df_dnn_test_check[dnn_training_features]
df_dnn_test_check["date"] = pd.to_datetime(df_dnn_test_check.date)
# checking if train have any class that test don't have, if so I will remove it
display(f"test: {df_dnn_test_check['cat_gen'].nunique()} number of class in the derived category")
display(f"train: {dnn_df_processed['cat_gen'].nunique()} number of class in the derived category")

# I will use the 95 day from end of train dataset time series for validation
Test_sample_size=95
SPLIT_DATE=pd.to_datetime("2017-08-15")-pd.to_timedelta((Test_sample_size), unit='d')
Train_sample_size =365*4+1
Training_interval=pd.to_timedelta((Train_sample_size), unit='d')
train_data_begin_date=SPLIT_DATE-Training_interval
display(f"train_data_begin_date: {train_data_begin_date}")
dnn_df_processed=dnn_df_processed.loc[dnn_df_processed["date"]>=train_data_begin_date]
display(dnn_df_processed.head(2))
#creating a copy of processed dataset for ease of access to unscaled features for later comparison, evaluation
dnn_df_processed_unscaled=dnn_df_processed.copy()
grouped_sample_lenght=len(dnn_df_processed_unscaled[dnn_df_processed_unscaled["cat_gen"]==0].groupby("date")[['cat_gen','sales']].agg({'cat_gen':'mean', 'sales':'median'}))
display(f"lenght of dataframes grouped by cat_gen: {grouped_sample_lenght}")

#defining parameters for training
SPLIT_TIME =grouped_sample_lenght-Test_sample_size
WINDOW_SIZE = 16
BATCH_SIZE = 32
SHUFFLE_BUFFER_SIZE = 580

def create_datasets(df):
    dnn_data_sets={}
    fitted_scalers={}
    for val in df["cat_gen"].unique():
        df_segment=df[df["cat_gen"]==val]
        df_segment_grouped=df_segment.groupby("date")[['cat_gen','sales']].agg({'cat_gen':'mean', 'sales':'median'})
        df_segment_grouped=df_segment_grouped.drop(columns=['cat_gen'])
        scaler=MinMaxScaler()
        scaler.fit(df_segment_grouped[["sales"]])
        fitted_scalers[val]=scaler
        df_segment_grouped["sales"]=scaler.transform(df_segment_grouped[["sales"]])
        df_segment_grouped["sales"] = df_segment_grouped["sales"].astype(np.float32)
        dnn_data_sets[val]=df_segment_grouped
    return dnn_data_sets , fitted_scalers
dnn_data_sets, fitted_scalers=create_datasets(dnn_df_processed)

# printing a sample of sub-datasets shape info
Sample_of_df_dict=np.random.choice([x for x in dnn_data_sets.keys()], 10 ,replace=False)
for key in Sample_of_df_dict:
    display(f"{key} : {dnn_data_sets[key].shape}")

# verifying number of samples to be used for training and validation
len_test=len(dnn_data_sets[0][dnn_data_sets[0].index <=SPLIT_DATE])
display(f"train size: {len_test}, test size: {dnn_data_sets[0].shape[0]-len_test}")

# helper function to parse dataframes to extract time and series
def parse_df(df):
    times =np.array([x for x in range(df.shape[0])])
    series=df["sales"].to_numpy()
    return times, series

# helper function to devide time and series to train test splits according to time-step
def train_val_split(time, series, time_step):
    time_train = time[:time_step]
    series_train = series[:time_step]
    time_valid = time[time_step:]
    series_valid = series[time_step:]
    return time_train, series_train, time_valid, series_valid

# helper function to create windowed dataset tensors from series by shifting according to window size
# and create batches from it for training
def windowed_dataset(series, window_size, batch_size, shuffle_buffer):
    ds = tf.data.Dataset.from_tensor_slices(series)
    ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
    ds = ds.flat_map(lambda w: w.batch(window_size + 1))
    ds = ds.shuffle(shuffle_buffer)
    ds = ds.map(lambda w: (w[:-1], w[-1]))
    ds = ds.batch(batch_size).prefetch(1)
    return ds

# Helper function to plot loss graphs
def plot_loss_graphs(history, string):
    plt.plot(history.history[string])
    plt.plot(history.history['val_'+string])
    plt.xlabel("Epochs")
    plt.ylabel(string)
    plt.legend([string, 'val_'+string])
    plt.show()
    plt.close()


# call back function to avoid overfitting by early stopping and reduce learning rate on plateau to help convergence
def call():
    # early stopping
    es = tf.keras.callbacks.EarlyStopping(
        monitor='val_loss',  # metrics to monitor
        patience=20,  # how many epochs before stop
        verbose=1,
        mode='min',
        restore_best_weights=True)

    # reducing learning rate on plateau
    rp = tf.keras.callbacks.ReduceLROnPlateau(
        monitor='val_loss',
        factor=0.5,  # halving learning rate on plateau
        patience=3,
        verbose=1,
        mode='min',
        min_lr=1e-8)
    return es, rp

# printing 10 randomly selected loss map
Sample_of_loss_graphs=np.random.choice([x for x in history_dict.keys()], 10 ,replace=False,)
for key in Sample_of_loss_graphs:
    print(key)
    plot_loss_graphs(history_dict[key], "loss")


# helper function to plot time series actual vs time series predicted
def plot_series_multi(time1, series1, series2, label1, label2, format1="--", format2="-", start=0, end=None):
        fig = plt.figure(figsize=(14, 5))
        ax = fig.add_axes([1, 1, 1, 1])
        ax.plot(time1[start:end], series1[start:end])
        ax.plot(time1[start:end], series2[start:end])
        ax.legend(labels=(label1, label2))
        plt.xlabel("Time")
        plt.ylabel("Sales")
        plt.grid(True)
        plt.show()
        plt.close()