import os
import datetime
import time
import numpy as np
import pandas as pd
from tool import *
from multiprocessing import Pool
from sklearn.preprocessing import MinMaxScaler,StandardScaler
import warnings
warnings.filterwarnings("ignore")

# season_dicts = { 3:0,4:0,5:0,  6:1,7:1,8:1,  9:2,10:2,11:2, 12:3,1:3,2:3 }
# monthmids_dicts = dict(zip( list(range(1,32)),  [0]*5 + [1]*5 + [2]*5 + [3]*5 + [4]*5 + [5]*6 ))
def data_loader():
    #######################  超参数配置

    start_t = time.time()
    ###################################################################################
    ##### 获取活动、节假日、工作日、实际单量、原预测单量数据
    activity_df = get_activity_data()
    holidays_df = get_holidays_data()
    workdays_df = get_workdays_data()
    weather_df = get_weather_data()

    actual_df,actual_days_df = get_actual_data()
    forecast_df, forecast_days_df  = get_forecast_data()

    ###################################################################################
    ##### 日模型数据聚合，及日维度数据特征加工
    actual_days_df = pd.merge(actual_days_df,forecast_days_df,how ="left", on= ['shop_id','date'])
    actual_days_df['date']= actual_days_df.date.map(lambda x: int(x.replace('-','')))
    result1 = pd.merge(actual_days_df,workdays_df,how ="left", on= ['date'])
    result1 = pd.merge(result1,activity_df,how ="left", on= ['shop_id','date'])
    result1.activity = result1.activity.map(lambda x: 0 if pd.isnull(x) else 1)
    result1 = pd.merge(result1,holidays_df,how ="left", on= ['date'])
    result1 = result1.drop('holiday', axis=1)
    result1['daily'] =   result1.date.map(lambda x: datetime.datetime.strptime(str(x),'%Y%m%d').day)
    result1['weekday'] = result1.date.map(lambda x: datetime.datetime.strptime(str(x),'%Y%m%d').weekday())
    # result1['weekend'] = result1.weekday.map(lambda x: 1 if x >= 5 else 0)
    result1['duration']= result1.date.map(lambda x: (datetime.datetime.strptime(str(x), "%Y%m%d") - datetime.datetime.strptime('20230101', "%Y%m%d")).days)
    result1['year_days']= result1.date.map(lambda x: (datetime.datetime.strptime(str(x), '%Y%m%d')-datetime.datetime(datetime.datetime.strptime(str(x),'%Y%m%d').year, 1, 1)).days)
    # result1['season']= result1.date.map(lambda x: season_dicts[datetime.datetime.strptime(str(x),'%Y%m%d').month])
    # result1['monthmids']= result1.date.map(lambda x: monthmids_dicts[datetime.datetime.strptime(str(x),'%Y%m%d').day])
    result1['week_no']= result1.date.map(lambda x: datetime.datetime.strptime(str(x), '%Y%m%d').isocalendar()[1])
    result1['month']= result1.date.map(lambda x: datetime.datetime.strptime(str(x),'%Y%m%d').month)
    result1['year']= result1.date.map(lambda x: datetime.datetime.strptime(str(x),'%Y%m%d').year)
    result1['weekend'] = result1.weekday.map(lambda x: 1 if x >= 5 else 0)
    result1['workday'] =  result1['workday'] + result1['weekend']

    frame01 = result1[['date','workday']].drop_duplicates()
    frame01 = frame01.sort_values(by='date')
    frame01 = frame01.reset_index(drop=True)
    imp_lst01 = []
    for indd in range(len(frame01)):
        name = frame01.workday[indd]
        imp01 = 0
        up01 = indd + imp01
        down01 = indd - imp01
        while 0<=up01<len(frame01) and frame01.workday[up01]==name and 0<=down01< len(frame01) and frame01.workday[down01]==name:
            imp01 += 1
            up01 = indd + imp01
            down01 = indd - imp01
        imp01 = imp01 - name
        imp_lst01.append(imp01)
    frame01['imp01'] = imp_lst01
    frame01 = frame01.drop('workday', axis=1)

    result1 = pd.merge(result1, frame01, how="left", on=['date'])
    result1 = pd.merge(result1, weather_df, how="left", on=['shop_id', 'date'])
    result1.day_weather_level =  result1.day_weather_level + result1.workday*2
    result1.night_weather_level =  result1.night_weather_level + result1.workday*2

    result1['ids'] =  result1[['shop_id','date']].apply(lambda x: str(x['shop_id'])+'_'+str(x['date']), axis=1)
    assert len(result1) == len(result1['ids'].unique())
    custflows_dicts = dict(zip(result1['ids'].to_list(),  result1['custflows'].to_list() ))
    workdays_dicts  = dict(zip(result1['ids'].to_list(),  result1['workday'].to_list() ))
    weekdays_dicts  = dict(zip(result1['ids'].to_list(),  result1['weekday'].to_list() ))
    result1 = result1.drop('ids', axis=1)

    def get_previous_workd(meta):
        shop_id = meta['shop_id']
        date = meta['date']
        date = datetime.datetime.strptime(str(date), '%Y%m%d')
        previous_day = date - datetime.timedelta(days=1)
        id_key01 = shop_id + '_' + previous_day.strftime('%Y%m%d')
        id_key02 = shop_id + '_' + date.strftime('%Y%m%d')
        if id_key01 in workdays_dicts.keys() and id_key02 in workdays_dicts.keys():
            if workdays_dicts[id_key01] == 0 and workdays_dicts[id_key02] == 1:
                return 5
            else:
                return weekdays_dicts[id_key02]
        else:
            return weekdays_dicts[id_key02]
    result1['weekday'] = result1[['shop_id', 'date']].apply(get_previous_workd, axis=1)
    result1 = result1.reset_index(drop=True)

    # def get_previous_tuesday(meta):
    #     shop_id = meta['shop_id']
    #     date = meta['date']
    #     date = datetime.datetime.strptime(str(date), '%Y%m%d')
    #     last_week_tuesday = date - datetime.timedelta(days=date.weekday()+1)
    #     last_week_tuesday -= datetime.timedelta(days=5)
    #     id_key = shop_id+'_'+ last_week_tuesday.strftime('%Y%m%d')
    #     return custflows_dicts[id_key] if id_key in custflows_dicts.keys() else None
    # result1['tues_cust'] = result1[['shop_id','date']].apply(get_previous_tuesday, axis=1)

    def get_previous_tuesdayt(meta):
        shop_id = meta['shop_id']
        date = meta['date']
        currut_key = shop_id + '_' + str(meta['date'])
        date = datetime.datetime.strptime(str(date), '%Y%m%d')
        last_week_tuesday = date - datetime.timedelta(days=date.weekday() + 1)
        last_week_tuesday -= datetime.timedelta(days=5)
        start = (last_week_tuesday - datetime.timedelta(days=59)).strftime('%Y%m%d')
        date_spans = pd.date_range(start=start, end=last_week_tuesday.strftime('%Y%m%d'))
        ids_key = [shop_id + '_' + x.strftime('%Y%m%d') for x in date_spans]
        segment_data60 = [custflows_dicts[k] for k in ids_key if k in custflows_dicts.keys()]
        if len(segment_data60) > 0:
            mean_t = np.mean(segment_data60)
            segment_data60 = [x for x in segment_data60 if 0.1*mean_t <= x <= 10*mean_t]
        if len(segment_data60) > 0:
            segment_arr = np.array(segment_data60)
            scaler = MinMaxScaler()  # MinMaxScaler , StandardScaler
            standardized_data = scaler.fit_transform(segment_arr.reshape(-1, 1))
            standardized_data = standardized_data.reshape(-1).tolist()
            mean07 = np.mean(standardized_data[:7])
            max_t07 = np.max(standardized_data[:7])
            min_t07 = np.min(standardized_data[:7])
            mean14 = np.mean(standardized_data[:14])
            max_t14 = np.max(standardized_data[:14])
            min_t14 = np.min(standardized_data[:14])
            mean30 = np.mean(standardized_data[:30])
            max_t30 = np.max(standardized_data[:30])
            min_t30 = np.min(standardized_data[:30])
            mean60 = np.mean(standardized_data)
            max_t60 = np.max(standardized_data)
            min_t60 = np.min(standardized_data)
            scope_t = max(segment_data60)-min(segment_data60)
            if scope_t > 0:
                custflows_tr = (custflows_dicts[currut_key]-min(segment_data60))/scope_t
            else:
                custflows_tr = custflows_dicts[currut_key]/min(segment_data60)
            return [standardized_data[0],mean07, max_t07, min_t07, mean14, max_t14, min_t14,mean30, max_t30, min_t30,mean60, max_t60, min_t60,custflows_tr,scope_t,min(segment_data60)]
        else:
            return [None, None, None, None, None, None, None, None, None, None, None, None, None,None, None,None]

    # result_temp = result00.copy()
    segmented_feats = result1[['shop_id', 'date']].apply(get_previous_tuesdayt, axis=1)
    result1['tues_cust'] = segmented_feats.map(lambda x: x[0])
    result1['mean07'] = segmented_feats.map(lambda x: x[1])
    result1['max07'] = segmented_feats.map(lambda x: x[2])
    result1['min07']  = segmented_feats.map(lambda x: x[3])
    result1['mean14'] = segmented_feats.map(lambda x: x[4])
    result1['max14']  = segmented_feats.map(lambda x: x[5])
    result1['min14']  = segmented_feats.map(lambda x: x[6])
    result1['mean30'] = segmented_feats.map(lambda x: x[7])
    result1['max30']  = segmented_feats.map(lambda x: x[8])
    result1['min30']  = segmented_feats.map(lambda x: x[9])
    result1['mean60'] = segmented_feats.map(lambda x: x[10])
    result1['max60']  = segmented_feats.map(lambda x: x[11])
    result1['min60']  = segmented_feats.map(lambda x: x[12])
    result1['custflows_tr']  = segmented_feats.map(lambda x: x[13])
    result1['scope_t']  = segmented_feats.map(lambda x: x[14])
    result1['segment_min']  = segmented_feats.map(lambda x: x[15])

    result1 = pd.merge(result1,pd.read_csv('external_data/abnormal.csv'),how ="left", on= ['shop_id','date'])
    result1 = result1[result1.abnormal.isnull()]
    result1 = result1.drop('abnormal', axis=1)


    result2 = result1.sort_values(by=['date'],ascending=[True])
    result2 = result2[result2.date >= 20230101]
    result2.to_csv('custflows.csv',index=False)
    ##################################################################################

    # promo_feature_df = pd.read_csv('external_data/promo_feature.csv')
    # promo_feature_df = promo_feature_df.rename(columns={'sdt': 'date'})
    # result2 = pd.merge(result2, promo_feature_df, how="left", on=['shop_id', 'date'])
    # poi_amount_df = pd.read_csv('external_data/41家门店各类POI数量.csv')
    # result2 = pd.merge(result2, poi_amount_df, how="left", on='shop_id')
    # shop_id_dicts = dict(zip(result2['shop_id'].unique().tolist(), range(len(result2['shop_id'].unique()))))
    # result2['shop_no'] = result2.shop_id.map(lambda x: shop_id_dicts[x])
    # result2 = result2.sort_values(by=['shop_id', 'date'], ascending=[True, True])
    # result2.to_csv('custflows_uni.csv', index=False)


    ##################################################################################
    ######## 半小时维度数据聚合 ， 添加日度特征

    actual_df = pd.merge(actual_df,forecast_df,how ="left", on= ['shop_id', 'date_id', 'start_time'])
    actual_df = actual_df.rename(columns={'date_id': 'date'})
    actual_df['date']= actual_df.date.map(lambda x: int(x.replace('-','')))

    result3 = pd.merge(actual_df,result2,how ="left", on= ['shop_id', 'date'])
    result3['halfhour']= result3.start_time.map(lambda x: int(x.split(':')[0])*2 + int(x.split(':')[1])//30)
    result3.custflow = result3.custflow.fillna(0)
    result3 = result3.sort_values(by=['date','halfhour'],ascending=[True,True])
    result3 = result3[result3.date >= 20230101]
    result3.to_csv('custflow.csv',index=False)


    print(time.time()-start_t,'finish!!!')


if __name__ == '__main__':
    data_loader()







