#!/usr/bin/env python
# coding: utf-8

# # 准备数据

# In[17]:


import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime


# In[18]:


train_path=r"D:\UsersData\WeChat\WeChat Files\wxid_o4wfhvzdw46622\FileStorage\File\2023-10\store-sales-time-series-forecasting\train.csv"
transactions_path=r"D:\UsersData\WeChat\WeChat Files\wxid_o4wfhvzdw46622\FileStorage\File\2023-10\store-sales-time-series-forecasting\transactions.csv"
stores_path=r"D:\UsersData\WeChat\WeChat Files\wxid_o4wfhvzdw46622\FileStorage\File\2023-10\store-sales-time-series-forecasting\stores.csv"
oil_path=r"D:\UsersData\WeChat\WeChat Files\wxid_o4wfhvzdw46622\FileStorage\File\2023-10\store-sales-time-series-forecasting\oil.csv"
holidays_events_path=r"D:\UsersData\WeChat\WeChat Files\wxid_o4wfhvzdw46622\FileStorage\File\2023-10\store-sales-time-series-forecasting\holidays_events.csv"


# In[19]:


#数据读取
train_df = pd.read_csv(train_path)
store_df = pd.read_csv(stores_path)
oil_df = pd.read_csv(oil_path)
holidays_events_df = pd.read_csv(holidays_events_path)


# # 石油价格相关性

# In[20]:


merged_df = pd.merge(train_df, oil_df, on='date', how='left')

oil_sales = merged_df.groupby('date')['sales'].sum().reset_index()

# drop missing data
merged_df.dropna(subset=['sales', 'dcoilwtico'], inplace=True)

fig, ax1 = plt.subplots(figsize=(12, 6))

# plot for sales
ax1.set_xlabel('Date')
ax1.set_ylabel('Sales', color='tab:red')
sales_line, = ax1.plot(merged_df['date'], merged_df['sales'], color='tab:orange', label='Sales')
ax1.tick_params(axis='y', labelcolor='tab:orange')

# plot for oil price
ax2 = ax1.twinx()
ax2.set_ylabel('Oil Price (dcoilwtico)', color='tab:blue')
oil_line, = ax2.plot(merged_df['date'], merged_df['dcoilwtico'], color='tab:blue', label='Oil Price')
ax2.tick_params(axis='y', labelcolor='tab:blue')

plt.title('Sales and Oil Price Over Time')
fig.legend(handles=[sales_line, oil_line], loc='upper right')
plt.show()


# In[21]:


#检查促销对销售的影响
plt.figure(figsize=(4, 4))
# The number of 'onpromotion' is bigger than 10
sns.scatterplot(x='onpromotion', y='sales', data=train_df[train_df['onpromotion'] == 10], color='red', label='Promotion Count = 10')
# The number of 'onpromotion' is less than 10
sns.scatterplot(x='onpromotion', y='sales', data=train_df[train_df['onpromotion'] > 10], color='blue', label='Promotion Count > 10')
# The number of 'onpromotion' is 0
sns.scatterplot(x='onpromotion', y='sales', data=train_df[train_df['onpromotion'] == 0], color='green', label='No Promotion')
plt.title('Sales vs. Number of Promotions')
plt.xlabel('Number of Promotions')
plt.ylabel('Sales')
plt.legend()
plt.show()


# In[22]:


holidays_events_df['date'] = pd.to_datetime(holidays_events_df['date'])
train_df['date'] = pd.to_datetime(train_df['date'])

holiday_types = ['Holiday', 'Additional', 'Bridge', 'Work Day', 'Transfer', 'Event']
for holiday_type in holiday_types:
    holidays_events_df[holiday_type] = (holidays_events_df['type'] == holiday_type).astype(int)

train_df = pd.merge(train_df, holidays_events_df[['date'] + holiday_types], on='date', how='left')
train_df[holiday_types] = train_df[holiday_types].fillna(0).astype(int)

train_df.head()


# In[23]:


holiday_sales_means = {}

for holiday_type in holiday_types:
    filtered_rows = train_df[train_df[holiday_type] == 1.0]
    holiday_sales_means[holiday_type] = filtered_rows['sales'].mean()

no_holiday_data = train_df[(train_df[holiday_types].sum(axis=1) == 0)]
holiday_sales_means['No Holiday'] = no_holiday_data['sales'].mean()


fig, ax = plt.subplots()
ax.bar(holiday_sales_means.keys(), holiday_sales_means.values(), color='skyblue')
ax.set_title('Total Sales by Holiday Type')
ax.set_xlabel('Holiday Type')
ax.set_ylabel('Total Sales')
plt.xticks(rotation=45)
plt.show()


# In[24]:


# holiday local
merged_df = pd.merge(train_df, holidays_events_df, on='date', how='left')
locale_sales = merged_df.groupby('locale')['sales'].sum().reset_index()

plt.figure(figsize=(4, 4))
plt.bar(locale_sales['locale'], locale_sales['sales'], color='skyblue')
plt.title('Total Sales by Locale')
plt.xlabel('Locale')
plt.ylabel('Total Sales')
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()


# In[25]:


train_df = pd.get_dummies(train_df, columns=['family'])

family_types = [col for col in train_df.columns if col.startswith('family_')]
family_sales_sum = {}

for family_type in family_types:
    filtered_rows = train_df[train_df[family_type] == 1.0]
    family_sales_sum[family_type] = filtered_rows['sales'].sum()

fig, ax = plt.subplots(figsize=(15, 4)) 
ax.bar(family_sales_sum.keys(), family_sales_sum.values(), color='skyblue')
ax.set_title('Total Sales by Family Type')
ax.set_xlabel('Family Type')
ax.set_ylabel('Total Sales')
plt.xticks(rotation=90)
plt.show()


# # 日期与销售的关系

# In[26]:


train_df['date'] = pd.to_datetime(train_df['date'])

# Add day of the week as new columns
# 0 is Monday
train_df['day_of_week'] = train_df['date'].dt.dayofweek

train_df[['id', 'date', 'day_of_week']]

day_avg_sales = train_df.groupby('day_of_week')['sales'].mean().reset_index()

plt.figure(figsize=(10, 6))
plt.bar(day_avg_sales['day_of_week'], day_avg_sales['sales'], color='skyblue')
plt.title('Average Sales by Day')
plt.xlabel('Day')
plt.ylabel('Average Sales')
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()


# In[28]:


# 月份的影响
train_df['month'] = train_df['date'].dt.month
month_avg_sales = train_df.groupby('month')['sales'].mean().reset_index()

plt.figure(figsize=(6, 4))
plt.bar(month_avg_sales['month'], month_avg_sales['sales'], color='skyblue')
plt.title('Average Sales by Month')
plt.xlabel('Month')
plt.ylabel('Average Sales')
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()


# In[29]:


#月的每天的影响
train_df['day_of_month'] = train_df['date'].dt.day
day_of_month_sales_sum = train_df.groupby('day_of_month')['sales'].sum().reset_index()

plt.figure(figsize=(6, 4))
plt.bar(day_of_month_sales_sum['day_of_month'], day_of_month_sales_sum['sales'], color='skyblue')
plt.title('Total Sales by Day of Month')
plt.xlabel('Day of Month')
plt.ylabel('Total Sales')
plt.xticks(range(1, 32)) 
plt.ylim(20000000, day_of_month_sales_sum['sales'].max() * 1.1) 
plt.tight_layout()
plt.show()


# # 商店的影响

# In[31]:


store_sales_sum = train_df.groupby('store_nbr')['sales'].sum().reset_index()

plt.figure(figsize=(6, 4))
plt.bar(store_sales_sum['store_nbr'], store_sales_sum['sales'], color='skyblue')
plt.title('Sales by Store')
plt.xlabel('Store Number')
plt.ylabel('Sales')
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()


# In[32]:


#商店类型
merged_df = pd.merge(train_df, store_df, on='store_nbr', how='left')
type_avg_sales = merged_df.groupby('type')['sales'].mean().reset_index()

plt.figure(figsize=(10, 4))
plt.bar(type_avg_sales['type'], type_avg_sales['sales'], color='skyblue')
plt.title('Average Sales by Store Type')
plt.xlabel('Store Type')
plt.ylabel('Average Sales')
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()


# In[34]:


#城市的影响
city_avg_sales = merged_df.groupby('city')['sales'].mean().reset_index()
plt.figure(figsize=(10, 4))
plt.bar(city_avg_sales['city'], city_avg_sales['sales'], color='skyblue')
plt.title('Average Sales by City')
plt.xlabel('City')
plt.ylabel('Average Sales')
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()


# In[35]:


import numpy as np 
import pandas as pd 

from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_log_error

import datetime
from sklearn import preprocessing
from sklearn.model_selection import train_test_split

from sklearn.metrics import mean_squared_log_error
from xgboost import XGBRegressor
from catboost import CatBoostRegressor

from sklearn.preprocessing import MinMaxScaler
from statsmodels.tsa.deterministic import DeterministicProcess, CalendarFourier

from statsmodels.graphics.tsaplots import plot_pacf

import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)


# In[36]:


train = pd.read_csv(r'D:\UsersData\WeChat\WeChat Files\wxid_o4wfhvzdw46622\FileStorage\File\2023-10\store-sales-time-series-forecasting\train.csv',index_col='id', parse_dates = ['date'], infer_datetime_format = True)
test = pd.read_csv(r'D:\UsersData\WeChat\WeChat Files\wxid_o4wfhvzdw46622\FileStorage\File\2023-10\store-sales-time-series-forecasting\test.csv',index_col='id', parse_dates = ['date'], infer_datetime_format = True)


# In[37]:


train_test = pd.concat([train, test], ignore_index=True)


# In[38]:


train_test.date = pd.to_datetime(train_test.date)

train_test['year'] = train_test.date.dt.year
train_test['month'] = train_test.date.dt.month
train_test['dayofmonth'] = train_test.date.dt.day
train_test['dayofweek'] = train_test.date.dt.dayofweek
train_test['dayname'] = train_test.date.dt.strftime('%A')


# In[40]:


oil = pd.read_csv(r'D:\UsersData\WeChat\WeChat Files\wxid_o4wfhvzdw46622\FileStorage\File\2023-10\store-sales-time-series-forecasting\oil.csv', parse_dates = ['date'], infer_datetime_format = True, index_col = 'date').to_period('D')


# In[41]:


oil['avg_oil_7'] = oil['dcoilwtico'].rolling(7).mean()


# In[42]:


trends = pd.DataFrame(index = pd.date_range('2013-01-01','2017-08-31')).to_period('D')
trends = trends.join(oil, how='outer')
trends['avg_oil_7'].fillna(method = 'ffill', inplace = True)
trends.dropna(inplace = True)


# In[43]:


_ = plot_pacf(trends.avg_oil_7, lags = 12)


# In[44]:


n_lags = 3
for l in range(1, n_lags+1):
    trends[f'oil_lags7_{l}'] = trends.avg_oil_7.shift(l)
trends.dropna(inplace = True)
trends.count()


# In[45]:


trends['date_str'] = trends.index.astype(str)
trends.drop('dcoilwtico', axis=1, inplace=True)


# In[46]:


trends


# In[47]:


oil = oil.interpolate(method='linear')
oil.iloc[0] = oil.iloc[1]

start_date = train_test.date.min() 
number_of_days = 1704 
date_list = [(start_date + datetime.timedelta(days = day)).isoformat() for day in range(number_of_days)]

date = (pd.Series(date_list)).to_frame()
date.columns = ['date']
date.date = pd.to_datetime(date.date)
date['date_str'] = date.date.astype(str)
oil['date_str'] = oil.index.astype(str)

oil = pd.merge(date,oil,how='left',on='date_str')
oil = oil.set_index('date').dcoilwtico.interpolate(method='linear').to_frame()
oil['date_str'] = oil.index.astype(str)


# In[48]:


train_test['date_str'] = train_test.date.astype(str)
train_test = pd.merge(train_test,oil,how='left',on='date_str')


# In[49]:


train_test = pd.merge(train_test,trends,how='left',on='date_str')
train_test.drop(columns='date_str',axis=1,inplace=True)


# In[50]:


train_test


# In[51]:


train_test['onpromo_7'] = train_test['onpromotion'].rolling(7).mean()
train_test['onpromo_28'] = train_test['onpromotion'].rolling(28).mean()
train_test['onpromo_7'].fillna(0, inplace=True)
train_test['onpromo_28'].fillna(0, inplace=True)


# In[52]:


stores = pd.read_csv(r'D:\UsersData\WeChat\WeChat Files\wxid_o4wfhvzdw46622\FileStorage\File\2023-10\store-sales-time-series-forecasting\stores.csv',index_col='store_nbr')
train_test = pd.merge(train_test,stores,how='left',on='store_nbr')


# In[53]:


holiday = pd.read_csv(r'D:\UsersData\WeChat\WeChat Files\wxid_o4wfhvzdw46622\FileStorage\File\2023-10\store-sales-time-series-forecasting\holidays_events.csv')
holiday.query('transferred==True')

holiday = holiday.query('transferred ==False')
holiday.description = holiday.description.str.replace('Traslado ','')

national = holiday.query('locale=="National"')
day_off = national.query('type!="Work Day" or type!="Event"').set_index('date')['description'].to_dict()
train_test['date_str'] = train_test.date.astype(str)
train_test['national_holiday'] = [1 if a in day_off else 0 for a in train_test.date_str]

event = national.query('type=="Event"').set_index('date')['description'].to_dict()
train_test['national_event'] =[1 if a in event else 0 for a in train_test.date_str]

work_day = national.query('type=="Work Day"').set_index('date')['description'].to_dict()
train_test['national_workday'] = [1 if a in work_day else 0 for a in train_test.date_str]

train_test['weekend'] = [1 if a>=5 else 0 for a in train_test.dayofweek]


# In[54]:


local = holiday.query('locale=="Local"')
local_dic = local.set_index('date').locale_name.to_dict()
train_test['local_holiday']=[1 if b in local_dic and local_dic[b]== a else 0 for a,b in zip(train_test.city,train_test.date_str)]
regional = holiday.query('locale=="Regional"')
regional_dic = regional.set_index('date').locale_name.to_dict()
train_test['regional_holiday']= [1 if b in regional_dic and regional_dic[b]== a else 0 for a,b in zip(train_test.state,train_test.date_str)]
train_test


# In[55]:


train_test.sales = np.log1p(train_test.sales)
len(train_test.query('date_str=="2013-01-01"'))


# In[56]:


train_test['Istest'] = False

train_test['Lag_7'] = train_test['sales'].shift(1782*7)

train_test['Lag_16'] = train_test['sales'].shift(1782*16)
train_test['Lag_17'] = train_test['sales'].shift(1782*17)
train_test['Lag_18'] = train_test['sales'].shift(1782*18)
train_test['Lag_19'] = train_test['sales'].shift(1782*19)
train_test['Lag_20'] = train_test['sales'].shift(1782*20)
train_test["Lag_21"] = train_test['sales'].shift(1782*21)
train_test['Lag_22'] = train_test['sales'].shift(1782*22)

train_test['Lag_28'] = train_test['sales'].shift(1782*28)
train_test['Lag_30'] = train_test['sales'].shift(1782*30)
train_test['Lag_31'] = train_test['sales'].shift(1782*31)

train_test['Lag_365'] = train_test['sales'].shift(1782*365)

train_test = train_test.query('Istest==False')


# In[57]:


def ewm_features(dataframe, alphas, lags):
    for alpha in alphas:
        for lag in lags:
            feature_name = 'sales_ewm_alpha_' + str(alpha).replace(".", "") + '_lag_' + str(lag)
            dataframe[feature_name] = dataframe.groupby(["store_nbr", "family"])['sales'].\
                transform(lambda x: x.shift(lag).ewm(alpha=alpha, min_periods=1).mean())
    return dataframe
alphas = [0.95, 0.8, 0.65, 0.5]
lags =[1, 7,30]
train_test = ewm_features(train_test, alphas, lags)


# In[58]:


corr = train_test.corr()
corr['sales'].sort_values(ascending=False)


# In[59]:


lags = ['Lag_7','Lag_16','Lag_17','Lag_18','Lag_19','Lag_20','Lag_21','Lag_22','Lag_28', 'Lag_30','Lag_31','Lag_365',
       'oil_lags7_1', 'oil_lags7_2', 'oil_lags7_3', 'avg_oil_7', 'sales_ewm_alpha_095_lag_1', 'sales_ewm_alpha_095_lag_7',
        'sales_ewm_alpha_095_lag_30', 'sales_ewm_alpha_08_lag_1', 'sales_ewm_alpha_08_lag_7', 'sales_ewm_alpha_08_lag_30',
        'sales_ewm_alpha_065_lag_1', 'sales_ewm_alpha_065_lag_7', 'sales_ewm_alpha_065_lag_30', 'sales_ewm_alpha_05_lag_1',
        'sales_ewm_alpha_05_lag_7', 'sales_ewm_alpha_05_lag_30']
train_test[lags]=train_test[lags].fillna(0)


# In[60]:


families = ['AUTOMOTIVE','BABY CARE','BEAUTY','BEVERAGES','BOOKS','BREAD/BAKERY','CELEBRATION','CLEANING','DAIRY',
            'DELI','EGGS','FROZEN FOODS','GROCERY I','GROCERY II','HARDWARE','HOME AND KITCHEN I','HOME AND KITCHEN II',
            'HOME APPLIANCES','HOME CARE','LADIESWEAR','LAWN AND GARDEN','LINGERIE','LIQUOR,WINE,BEER','MAGAZINES',
            'MEATS','PERSONAL CARE','PET SUPPLIES','PLAYERS AND ELECTRONICS','POULTRY','PREPARED FOODS','PRODUCE',
            'SCHOOL AND OFFICE SUPPLIES','SEAFOOD']
print(train_test.columns.tolist())


# In[61]:


FEATURES = train_test.columns.tolist()
no_features = ('date', 'family', 'sales', 'dayname', 'date_str', 'Istest')

for i in no_features:
    FEATURES.remove(i)
TARGET =['sales']


# In[62]:


categories = ['city','state', 'type']
for i in categories:
    encoder = preprocessing.LabelEncoder()
    train_test[i] = encoder.fit_transform(train_test[i])


# In[63]:


train = train_test[train_test['sales'].notnull()].copy()
test = train_test[train_test['sales'].isnull()].drop(['sales'],axis=1)


# In[64]:


train = train.query("date >= '2016-01-01' ")
train


# In[65]:


params = {'lambda': 6.105970537016599, 
          'alpha': 0.874716179324655, 
          'eta': 0.047228549789593455, 
          'colsample_bytree': 0.5, 
          'subsample': 0.7, 
          'learning_rate': 0.012, 
          'n_estimators': 1000, 
          'max_depth': 17, 
          'min_child_weight': 155,
          'early_stopping_rounds': 10}


# In[66]:


cat_predictions = []
xgb_predictions = []
y_val_cat = pd.DataFrame()
y_val_xgb = pd.DataFrame()
cat = CatBoostRegressor()
xgb = XGBRegressor(**params)

test_predict = pd.DataFrame()

cat_submit = []
xgb_submit = []

for family in families:
    train_family = train.loc[train['family'] == family]
    X_train_family,X_val_family,y_train_family,y_val_family = train_test_split(train_family,
                                                                               train_family[TARGET],
                                                                               test_size=0.05,shuffle=False)
    
    cat.fit(X_train_family[FEATURES], y_train_family, eval_set=[(X_train_family[FEATURES],y_train_family),
                                                               (X_val_family[FEATURES], y_val_family)], 
            verbose=False,early_stopping_rounds=10)
    
    xgb.fit(X_train_family[FEATURES], y_train_family, eval_set=[(X_train_family[FEATURES],y_train_family),
                                                                (X_val_family[FEATURES], y_val_family)],
            verbose=False)
    
    cat_pred_family = cat.predict(X_val_family[FEATURES])
    cat_pred_family = [a if a>0 else 0 for a in cat_pred_family]
    cat_predictions.extend(cat_pred_family)
    y_val_cat = y_val_cat.append(y_val_family)
    
    xgb_pred_family = xgb.predict(X_val_family[FEATURES])
    xgb_pred_family = [a if a>0 else 0 for a in xgb_pred_family]
    xgb_predictions.extend(xgb_pred_family)
    y_val_xgb = y_val_xgb.append(y_val_family)
    
    test_family = test.loc[test['family'] == family]
    
    cat_pred_submit = cat.predict(test_family[FEATURES])
    cat_pred_submit = [a if a>0 else 0 for a in cat_pred_submit]
    cat_submit.extend(cat_pred_submit)
    
    xgb_pred_submit = xgb.predict(test_family[FEATURES])
    xgb_pred_submit = [a if a>0 else 0 for a in xgb_pred_submit]
    xgb_submit.extend(xgb_pred_submit)
    
    test_predict = test_predict.append(test_family)
    
    print(family,'CatBoost RMSLE:', np.sqrt(mean_squared_log_error(y_val_family, cat_pred_family)))
    print(family,'XGB RMSLE:', np.sqrt(mean_squared_log_error(y_val_family, xgb_pred_family)))


# In[67]:


len(cat_predictions)


# In[68]:


y_val_cat['pred'] = cat_predictions
y_val_xgb['pred'] = xgb_predictions
y_val_cat


# In[69]:


y_val_xgb


# In[70]:


print('Overall CatBoost RMSLE:', np.sqrt(mean_squared_log_error(y_val_cat['sales'], y_val_cat['pred'])))
print('Overall XGB RMSLE:', np.sqrt(mean_squared_log_error(y_val_xgb['sales'], y_val_xgb['pred'])))


# In[71]:


test


# In[72]:


test_predict


# In[73]:


len(cat_submit)
len(xgb_submit)
predictions = [0.5 * a + 0.5 * b for a,b in zip(xgb_submit,cat_submit)] 
len(predictions)


# In[74]:


test_predict['pred'] = predictions
test_predict


# In[75]:


test_predict.sort_index(inplace=True)
test_predict


# In[77]:


output = pd.read_csv(r'D:\UsersData\WeChat\WeChat Files\wxid_o4wfhvzdw46622\FileStorage\File\2023-10\store-sales-time-series-forecasting\sample_submission.csv',index_col='id')
output['sales']= np.expm1(test_predict['pred'])


# In[78]:


output


# In[83]:





# In[ ]:




