# Basic packages
import numpy as np
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random as rd # generating random numbers
import datetime # manipulating date formats
# Viz
import matplotlib.pyplot as plt # basic plotting
import seaborn as sns # for prettier plots

# TIME SERIES
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.stattools import adfuller, acf, pacf,arma_order_select_ic
import statsmodels.tsa.api as smt
import statsmodels.formula.api as smf

import statsmodels.api as sm
from pandas.plotting import autocorrelation_plot
import scipy.stats as scs
import fbprophet as fbp

#======useless=========
# item_categories:
# item_category_name, item_category_id
# shops:
# shop_name, shop_id
#======================

# sales:
# date, date_block_num, shop_id, item_id, item_price, item_cnt_day

# item:
# item_name, item_id, item_category_id

# test:
# ID, shop_id, item_id

# sample submission:
# ID, item_cnt_month

def test_statianarity(timeseries):
    print('Results of Dickey-Fuller Test:')
    dftest = adfuller(timeseries, autolag = 'AIC')
    dfoutput = pd.Series(dftest[0:4],index = ['Test Statistic','p-value','#Lags Used','Number of Observeations Used'])
    critical_values = dftest[4]
    for key,value in critical_values.items():
        dfoutput['Critical Value (%s)'%key] = value
    print(dfoutput)

if __name__ == '__main__':
    sales = pd.read_csv('sales_train.csv.gz')
    item_cat = pd.read_csv('item_categories.csv')
    item = pd.read_csv('items.csv')
    sub = pd.read_csv('sample_submission.csv.gz')
    shops = pd.read_csv('shops.csv')
    test = pd.read_csv('test.csv.gz')

    sales['date'] = pd.to_datetime(sales['date'],format="%d.%m.%Y")
    print(sales.info())

    ts = sales.groupby(["date_block_num",'shop_id','item_id']).agg({'date':'min','item_cnt_day':'sum','item_price':['min','max']}).reset_index()

    m = fbp.Prophet()
    m.fit(ts[['date','item_cnt_day']].rename(columns={'date':'ds','item_cnt_day':'y'}))

    future = m.make_future_dataframe(1,freq='MS')
    p = m.predict(future)

    for i in range(10):
        itemid = np.random.randint(0,item['item_id'].max()+1)
