import datetime
import numpy as np
import pandas as pd
from config import config_scene
config01 = config_scene()


def get_activity_data():
    # activity_df01 = pd.read_csv('external_data/activity_df01.csv')
    # activity_df01.shop_id.replace('9M7G', '9M79', inplace=True)
    # activity_df02 = pd.read_csv('external_data/activity_df02.csv')
    # activity_df_lst = []
    # for name, group in activity_df01.groupby('shop_id'):
    #     group = pd.concat([group,activity_df02],ignore_index = True)
    #     group_df = group[[ 'start_date', 'end_date' ]]
    #     group_lst = []
    #     for i in range(len(group_df)):
    #         temp = pd.date_range(start = group_df.iloc[i]['start_date'],end = group_df.iloc[i]['end_date'])
    #         for ti in temp:
    #             time_s = int(ti.strftime('%Y%m%d'))
    #             if time_s not in group_lst:
    #                 group_lst.append(time_s)
    #     activity_group = pd.DataFrame(group_lst, columns=['date'])
    #     activity_group['shop_id'] = name
    #     activity_group['activity'] = 1
    #     activity_df_lst.append(activity_group)
    # activity_df = pd.concat(activity_df_lst,ignore_index = True)
    activity_df = pd.read_csv('active_data/activity_shop.csv')
    return activity_df

def divide_range(start, end, segments):
    interval_length = (end - start) / (segments+1)
    return [start + i * interval_length for i in range(1,segments+1)]

def get_festival_order(temp_df):
    temp_df['festival_order'] = list(range(1, len(temp_df) + 1))
    temp_df.holiday_today_cn = temp_df.holiday_today_cn.fillna(1)
    return temp_df

def get_holidays_data():
    holidays_df = pd.read_csv('external_data/holidays_df.csv')
    holidays_df.ds = holidays_df.ds.map(lambda x: x[:10])
    holiday_influence = pd.read_csv('external_data/holiday_influence.csv')
    holidays_df = pd.merge(holidays_df,holiday_influence,how ="left", on= ['holiday'])

    holidays_lst = []
    for i in range(len(holidays_df)):
        holiday = holidays_df.iloc[i].holiday
        hinfluence = holidays_df.iloc[i].holiday_influence
        holiday_id = holidays_df.iloc[i].holiday_id
        ds = datetime.datetime.strptime( holidays_df.iloc[i].ds,'%Y-%m-%d')
        start = (ds + datetime.timedelta(days = int(holidays_df.iloc[i].lower_window))).strftime('%Y-%m-%d')
        end =  (ds + datetime.timedelta(days = int(holidays_df.iloc[i].upper_window))).strftime('%Y-%m-%d')
        temp = pd.date_range(start = start,end = end)
        for i in range(len(temp)):
            time_int = int(temp[i].strftime('%Y%m%d'))
            holidays_lst.append([time_int,holiday, holiday_id,hinfluence])
    ret_df = pd.DataFrame(holidays_lst,columns=['date','holiday', 'holiday_id','hinfluence'])
    ret_df = ret_df.sort_values(by='holiday', ascending=True)
    ret_df = ret_df.reset_index(drop=True)
    ret_df = ret_df.drop_duplicates(['date'],keep='first')

    holiday_organized = pd.read_csv('external_data/holiday_organized.csv')
    holiday_organized = holiday_organized[['date', 'holiday_today_cn']]
    holid_dict = {'节日当天':2, '非节日当天':1}
    holiday_organized.holiday_today_cn =  holiday_organized.holiday_today_cn.map(lambda x: holid_dict[x])
    ret_df = pd.merge(ret_df, holiday_organized, how="left", on=['date'])

    ret_df = ret_df.sort_values(by=['date'],ascending=[True])
    ret_df = ret_df.reset_index(drop=True)
    subsets_df_lst = []
    sub_set = [0]
    for i in range(1, len(ret_df)):
        if ret_df.holiday.loc[i] == ret_df.holiday.loc[i-1]:
            sub_set.append(i)
        else:
            temp_df = ret_df.loc[sub_set,:].copy()
            temp_df = get_festival_order(temp_df)
            subsets_df_lst.append(temp_df)
            sub_set = [i]
    if len(sub_set) > 0:
        temp_df = ret_df.loc[sub_set, :].copy()
        temp_df = get_festival_order(temp_df)
        subsets_df_lst.append(temp_df)
    ret_df = pd.concat(subsets_df_lst,ignore_index = True)

    return ret_df


def get_workdays_data():
    workdays_df = pd.read_csv('external_data/workdays_df.csv')
    del workdays_df['inserttime']
    return workdays_df


def get_actual_data(spark):
    sql = f"""
    select sdt,date_id,shop_id,concat(hour, ':00') as start_time,count(1) as custflow
    from( select sdt,shop_id,substr(ready_time,1,10)  as date_id,substr(ready_time,12,2)  as hour
        from ods.ods_ofs_delivery_center_t_delivery_order_di
        where 1 = 1
        and sdt >= "20220101"
        and order_channel = '0'          -- 订单渠道(0:自营,1:京东,2:饿了么,3:美团)
        and delivery_type = '0'          -- 配送类型(0:常规,1:再次配送)
        and status not  in ('90','100')   -- 剔除已取消
        and shop_id in ("9M7H","9M8G"))a
    group by sdt,shop_id,date_id,concat(hour, ':00')
    ORDER BY sdt,shop_id, date_id ASC, concat(hour, ':00') ASC
    """
    actual_df = spark.sql(sql).toPandas()
    # actual_df = pd.read_csv('actual_df42.csv')
    actual_df.to_csv('actual_df.csv',index=False)
    actual_df.date_id = actual_df.date_id.astype(str)
    actual_df = actual_df[actual_df.date_id >= '2022-01-01']
    actual_df = actual_df[actual_df.date_id.map(lambda x: len(x)) >= 8]
    print('actual_df finish!!!')
    actual_df.shop_id.replace('9M7G', '9M79', inplace=True)
    actual_df.date_id = actual_df.date_id.map(lambda x: x[:10])

    start_tuesday = datetime.datetime.strptime(str(config01.start_day), '%Y%m%d')
    start_i_str = (start_tuesday + datetime.timedelta(days=config01.start_add)).strftime('%Y-%m-%d')
    end_i_str = (start_tuesday + datetime.timedelta(days=config01.end_add)).strftime('%Y-%m-%d')
    # exceed_df = actual_df[actual_df.date_id >= start_i_str]
    # assert len(exceed_df) == 0
    actual_df = actual_df[actual_df.date_id < start_i_str]

    time_df_lst = []
    start_time_lst = ['00:00', '01:00', '02:00', '03:00', '04:00', '05:00', '06:00', '07:00', '08:00', '09:00', '10:00', '11:00', '12:00', '13:00', '14:00', '15:00', '16:00', '17:00', '18:00', '19:00', '20:00', '21:00', '22:00', '23:00']
    day_spans = pd.date_range(start=start_i_str, end=end_i_str)
    for i in range(len(day_spans)):
        time_df = pd.DataFrame(start_time_lst,columns=['start_time'])
        time_df['date_id'] = day_spans[i].strftime('%Y-%m-%d')
        time_df_lst.append(time_df)
    all_time_df = pd.concat(time_df_lst, ignore_index=True)
    all_time_df['custflow'] = 0

    actual_lst = []
    for name, group in actual_df.groupby(['shop_id', 'date_id']):
        group01 = group[group.sdt == group.sdt.max()]
        actual_lst.append(group01)
    actual_df = pd.concat(actual_lst,ignore_index = True)
    actual_df = actual_df.drop('sdt',axis=1)

    actual_lst = []
    for name, group in actual_df.groupby('shop_id'):
        all_time_df01 = all_time_df.copy()
        all_time_df01['shop_id'] = name
        all_time_df01 = pd.concat([group,all_time_df01], ignore_index=True)
        actual_lst.append(all_time_df01)
    actual_df = pd.concat(actual_lst, ignore_index=True)

    actual_days_lst = []
    for name, group in actual_df.groupby(['shop_id', 'date_id']):
        actual_days_lst.append([name[0], name[1], group.custflow.sum()])
    actual_days_df = pd.DataFrame(actual_days_lst,columns=['shop_id','date','custflows'])
    return actual_df,actual_days_df


def get_weather_data(spark):
    weather_day_map = pd.read_csv('external_data/weather_day_map.csv')
    weather_night_map = pd.read_csv('external_data/weather_night_map.csv')
    start_day = datetime.datetime.strptime(str(config01.start_day),'%Y%m%d')
    end_01 = (start_day - datetime.timedelta(days=2)).strftime('%Y%m%d')
    start_01 = (start_day - datetime.timedelta(days=1)).strftime('%Y%m%d')
    sql = """
    select shop_id, date_id, day_temperature,night_temperature,day_rain,night_rain,day_wind,night_wind 
        from 
        (
            select shop_id,city_name 
            from dim.dim_shop 
            WHERE shop_id in ("9M7G","9M79","9M4U","9M5W","9M5Y","9M7H","9M7T","9M7W","9M7X","9M5Z","9M7V","9M8L","9M5M","9M6A","9M7F","9M7G","9M7N","9M8J","9M8K","9M6N","9M7E","9M8F","9M6D","9M7L","9M7M","9M7P","9M7R","9M8D","9M8H","9M8I","9M0K","9M6H","9M6I","9M7J","9M7U","9M7Y","9M8G","9M4M","9M7Q","9M8A","9M98","9M7I","9M7S","9M4V")
        ) t1
        join
        (
            select  date_id, city_name, day_temperature,night_temperature,day_rain,night_rain,day_wind,night_wind 
            from  data_mining.data_mining_day_weather_history
            WHERE sdt  BETWEEN  '20230101' and '{0}'

        ) t2 on t1.city_name = t2.city_name
        union all
        select shop_id, date_id, day_temperature,night_temperature,day_rain,night_rain,day_wind,night_wind
        from data_mining.data_mining_14days_weather_forecasts 
        where sdt = '{1}'
    """.format(end_01,start_01)
    weather_df = spark.sql(sql).toPandas()
    weather_df.to_csv('weather_df.csv', index=False)
    # weather_df = pd.read_csv('active_data/weather_df.csv')
    print(len(weather_df), weather_df.head(5))

    weather_df['date']= weather_df.date_id.map(lambda x: int(x.replace('-','')))
    weather_df = weather_df.drop('date_id', axis=1)

    weather_df = pd.merge(weather_df, weather_day_map, how="left", on=['day_rain'])
    weather_df = weather_df.drop('day_rain', axis=1)
    weather_df = weather_df.drop('day_weather_cata', axis=1)

    weather_df = pd.merge(weather_df, weather_night_map, how="left", on=['night_rain'])
    weather_df = weather_df.drop('night_rain', axis=1)
    weather_df = weather_df.drop('night_weather_cata', axis=1)

    weather_df = weather_df[['shop_id', 'date', 'day_weather_level', 'night_weather_level']]

    return  weather_df



