import time
import datetime
import numpy as np 
import pandas as pd
from multiprocessing import Pool
from model import *
from config import config_scene
import warnings
warnings.filterwarnings("ignore")
config01 = config_scene()


def task_halfhours():

    start_t = time.time()

    custflow_orig_all = pd.read_csv('custflow.csv')
    custflow_orig_all.hinfluence = custflow_orig_all.hinfluence.fillna(0)
    custflows_result = pd.read_csv('custflows_result.csv')
    custflows_result = custflows_result.rename(columns={'date_id': 'date', 'custflows':'preds'})
    custflows_result['date']= custflows_result.date.map(lambda x: int(x.replace('-','')))

    fina_result_lst = []
    for shop_id, custflow_orig in custflow_orig_all.groupby('shop_id'):
        custflows_shop_result = custflows_result[custflows_result.shop_id==shop_id]

        start_day = config01.start_day
        # week_num = config01.week_num
        start_add = config01.start_add
        end_add = config01.end_add

        custflow_result01 = pd.DataFrame()
        start_tuesday = datetime.datetime.strptime(str(start_day), '%Y%m%d')
        for i in range(1):
            start_tuesday_int = int(start_tuesday.strftime('%Y%m%d'))
            ########################################################################################################################
            ## 训练数据
            custflow_train = custflow_orig[custflow_orig.date <= start_tuesday_int]
            # custflow_train =  custflow_orig[custflow_orig.custflow <= 1.5*custflow_orig.custflow.mean()]
            # custflow_train =  custflow_orig[custflow_orig.custflow >= 0.5*custflow_orig.custflow.mean()]
            custflow_trainxy = custflow_train.copy()
            custflow_trainxy = custflow_trainxy.drop('shop_id', axis=1)
            custflow_trainxy = custflow_trainxy.drop('date', axis=1)
            custflow_trainxy = custflow_trainxy.drop('start_time', axis=1)
            custflow_trainxy = custflow_trainxy.drop('custflows', axis=1)
            custflow_trainxy = custflow_trainxy.drop('custflows_tr', axis=1)

            ########################################################################################################################
            ## 测试数据
            start_i_int = int((start_tuesday + datetime.timedelta(days=start_add)).strftime('%Y%m%d'))
            end_i_int = int((start_tuesday + datetime.timedelta(days=end_add)).strftime('%Y%m%d'))
            custflow_test = custflow_orig[(start_i_int <= custflow_orig.date) & (custflow_orig.date <= end_i_int)]
            custflow_test = custflow_test[custflow_test.custflow.isnull() == False]
            # custflow_test = custflow_test[custflow_test.pcustflow.isnull() == False]
            custflow_test = custflow_test.reset_index(drop=True)
            custflow_testX = custflow_test.copy()
            custflow_testX = custflow_testX.drop('shop_id', axis=1)
            custflow_testX = custflow_testX.drop('date', axis=1)
            custflow_testX = custflow_testX.drop('start_time', axis=1)
            custflow_testX = custflow_testX.drop('custflows', axis=1)
            custflow_testX = custflow_testX.drop('custflows_tr', axis=1)

            label_name = 'custflow'
            feats = [i for i in custflow_trainxy.columns]
            feats.remove(label_name)
            custflow_trainxy = custflow_trainxy.reset_index(drop=True)
            custflow_testX = custflow_testX.reset_index(drop=True)
            pred_y, oof = lgb_foldtrain_regression(custflow_trainxy, custflow_testX, feats, label_name, 10)
            custflow_test['pred'] = pred_y
            custflow_test['pred'] = custflow_test['pred'].map(lambda x: max(x, 0))
            custflow_test = pd.merge(custflow_test, custflows_shop_result, how="left", on=['shop_id', 'date'])
            custflow_test = custflow_test[['shop_id', 'date', 'start_time', 'pred', 'preds', 'sdt']]
            custflows_test_new = []
            for name, group in custflow_test.groupby(['shop_id', 'date']):
                group['pred'] = group['pred'] / group['pred'].sum()
                group['pred'] = (group['pred'] * group['preds']).map(lambda x: max(x, 0))
                custflows_test_new.append(group)
            custflows_test_new_df = pd.concat(custflows_test_new, ignore_index=True)
            custflows_test_new_df = custflows_test_new_df.reset_index(drop=True)
            custflow_result01 = pd.concat([custflow_result01, custflows_test_new_df], ignore_index=True)

            start_tuesday = start_tuesday + datetime.timedelta(days=7)

        fina_result_lst.append(custflow_result01)

        # mape02_lst = []
            # mape03_lst = []
            # for name, group in custflow_test.groupby('date'):
            #     group['pred'] = group['pred'] / group['pred'].sum()
            #     group['pred'] = (group['pred'] * group['preds']).map(lambda x: max(x, 0))
            #     group['err'] = (group.custflow - group.pred.map(lambda x: np.floor(x))).map(lambda x: abs(x))
            #     mape02_lst.append(group.err.sum() / group.custflow.sum())
            #     group['err'] = (group.custflow - group.pcustflow).map(lambda x: abs(x))
            #     mape03_lst.append(group.err.sum() / group.custflow.sum())
            # score_t2 = sum(mape02_lst) / len(mape02_lst)
            # score_t3 = sum(mape03_lst) / len(mape03_lst)
            # print( score_t2, score_t3)
            # score_lst.append([shop_id, score_t2, score_t3])


    custflow_result_fina = pd.concat(fina_result_lst, ignore_index=True)
    custflow_result_fina = custflow_result_fina[['shop_id', 'date', 'start_time','pred','sdt']]
    custflow_result_fina = custflow_result_fina.rename(columns={'date': 'date_id','pred': 'custflow'})
    custflow_result_fina = custflow_result_fina.astype(str)
    custflow_result_fina.date_id = custflow_result_fina.date_id.map(lambda x: datetime.datetime.strptime(x, '%Y%m%d').strftime('%Y-%m-%d'))
    custflow_result_fina.to_csv('custflow_result_fina.csv',index=False)


    print('model_halfhours time:', time.time()-start_t)



if __name__ == '__main__':
    task_halfhours()





