import os
import datetime
import time
import numpy as np 
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
import lightgbm as lgb
from sklearn import metrics
from sklearn.model_selection import KFold,train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing
from config import config_scene
import warnings
warnings.filterwarnings("ignore")
config01 = config_scene()

def model_train_regression(df_train, df_test, feats, label_name, n_fold=5):
    kfold = KFold(n_splits=n_fold, shuffle=True)
    train_label = df_train[label_name]
    oof = np.zeros(len(df_train))
    # importance = 0
    pred_y = 0
    params = { 'boosting_type' : 'gbdt',
                   'objective' : 'regression',
                   'metric' : 'rmse',
                    'learning_rate' : 0.05,
                   'num_leaves' : 31,
                   'max_depth' : -1,
                   'n_estimators' : 1000,
                    'subsample' : 0.7,
                   'subsample_freq' : 1,
                   'colsample_bytree' : 0.7 ,
                    'verbose': -1,
               }
    for fold, (train_idx, val_idx) in enumerate(kfold.split(df_train, train_label)):
        print('---------------------------', fold)
        train = lgb.Dataset(df_train.loc[train_idx, feats], df_train.loc[train_idx, label_name])
        val = lgb.Dataset(df_train.loc[val_idx, feats], df_train.loc[val_idx, label_name])
        # model = XGBRegressor()
        # model = XGBRegressor(
        #     max_depth=6, learning_rate=0.05, n_estimators=2000,
        #     objective='reg:linear',tree_method='hist',device = "cuda",
        #     subsample=0.8, colsample_bytree=0.8,eval_metric='auc', reg_lambda=0.5)
        # model = LGBMRegressor()
        model = lgb.train(params, train, valid_sets=val, num_boost_round=5000, callbacks=[lgb.early_stopping(100), lgb.log_evaluation(500)])
        oof[val_idx] = model.predict(df_train.loc[val_idx, feats])
        # importance += model.feature_importance(importance_type='gain') / n_fold
        pred_y += model.predict(df_test[feats].to_numpy()) / n_fold
    # feats_importance = pd.DataFrame()
    # feats_importance['name'] = feats
    # feats_importance['importance'] = importance
    return pred_y, oof #, feats_importance.sort_values("importance", ascending=False)

def model_train_regression02(df_train, df_test, feats, label_name):
    model = XGBRegressor(
        max_depth=6, learning_rate=0.05, n_estimators=2000,
        objective='reg:linear',tree_method='hist',device = "cuda",
        subsample=0.8, colsample_bytree=0.8,eval_metric='rmse', reg_lambda=0.5)
    model.fit(df_train[feats], df_train[label_name])
    oof = model.predict(df_train[feats])
    pred_y = model.predict(df_test[feats])
    # importance =  model.feature_importance(importance_type='gain')
    # feats_importance = pd.DataFrame()
    # feats_importance['name'] = feats
    # feats_importance['importance'] = importance
    return pred_y, oof  #feats_importance.sort_values("importance", ascending=False)

if __name__ == '__main__':
    start_t = time.time()

    start_day = config01.start_day
    week_num = config01.week_num
    start_add = config01.start_add
    end_add = config01.end_add
    lines_up = config01.lines_up
    lines_down = config01.lines_down
    holiday_cols = config01.holiday_cols

    custflows_orig_all = pd.read_csv('custflows_uni.csv')
    custflows_orig_all.hinfluence = custflows_orig_all.hinfluence.fillna(0)
    # custflows_orig_all = custflows_orig_all.fillna(0)
    # custflows_orig_all = custflows_orig_all[custflows_orig_all.shop_id=='9M8A'] #  9M8K、 9M8I
    # custflows_orig_all = custflows_orig_all.sort_values(by=['date'], ascending=True)
    # custflows_orig_all = custflows_orig_all.reset_index(drop=True)

    custflows_test_lst = []
    start_tuesday = datetime.datetime.strptime(str(start_day), '%Y%m%d')
    for i in range(week_num):
        ########################################################################################################################
        ## 训练数据
        start_tuesday_int = int(start_tuesday.strftime('%Y%m%d'))
        custflows_train =  custflows_orig_all[custflows_orig_all.date <= start_tuesday_int]
        # custflows_train =  custflows_train[custflows_train.custflows <= 1.5*custflows_train.custflows.mean()]
        # custflows_train =  custflows_train[custflows_train.custflows >= 0.5*custflows_train.custflows.mean()]
        # tmp_lst = [custflows_train] + [custflows_tdf] * 3
        # custflows_train = pd.concat(tmp_lst, ignore_index=True)

        custflows_train01 =  custflows_train[custflows_train.hinfluence.map(lambda x: abs(x)) <= lines_up]
        custflows_train01 = custflows_train01.reset_index(drop=True)
        custflows_trainxy01 = custflows_train01.copy()
        custflows_trainxy01 = custflows_trainxy01.drop('shop_id',axis=1)
        custflows_trainxy01 = custflows_trainxy01.drop('date',axis=1)
        custflows_trainxy01 = custflows_trainxy01.drop('pcustflows',axis=1)

        custflows_train02 = custflows_train[custflows_train.hinfluence > lines_up]
        custflows_train02 = custflows_train02[holiday_cols]
        custflows_train02 = custflows_train02.reset_index(drop=True)
        custflows_trainxy02 = custflows_train02.copy()
        custflows_trainxy02 = custflows_trainxy02.drop('shop_id', axis=1)
        custflows_trainxy02 = custflows_trainxy02.drop('date', axis=1)
        custflows_trainxy02 = custflows_trainxy02.drop('pcustflows', axis=1)

        custflows_train03 = custflows_train[custflows_train.hinfluence < lines_down]
        custflows_train03 = custflows_train03[holiday_cols]
        custflows_train03 = custflows_train03.reset_index(drop=True)
        custflows_trainxy03 = custflows_train03.copy()
        custflows_trainxy03 = custflows_trainxy03.drop('shop_id', axis=1)
        custflows_trainxy03 = custflows_trainxy03.drop('date', axis=1)
        custflows_trainxy03 = custflows_trainxy03.drop('pcustflows', axis=1)
        ########################################################################################################################
        ## 测试数据
        start_i = start_tuesday + datetime.timedelta(days=start_add)
        start_i_int = int(start_i.strftime('%Y%m%d'))
        end_i = start_tuesday + datetime.timedelta(days=end_add)
        end_i_int = int(end_i.strftime('%Y%m%d'))
        custflows_test  =  custflows_orig_all[(start_i_int <= custflows_orig_all.date)&(custflows_orig_all.date <= end_i_int)]
        custflows_test =  custflows_test[custflows_test.custflows.isnull() == False]
        custflows_test =  custflows_test[custflows_test.pcustflows.isnull() == False]

        custflows_test01 =  custflows_test[custflows_test.hinfluence.map(lambda x: abs(x)) <= lines_up]
        y_test01 = custflows_test01['custflows']
        p_test01 = custflows_test01['pcustflows']
        custflows_test01 = custflows_test01.reset_index(drop=True)
        custflows_testxy01 = custflows_test01.copy()
        custflows_testxy01 = custflows_testxy01.drop('shop_id',axis=1)
        custflows_testxy01 = custflows_testxy01.drop('date',axis=1)
        custflows_testxy01 = custflows_testxy01.drop('pcustflows',axis=1)

        custflows_test02 = custflows_test[custflows_test.hinfluence > lines_up]
        y_test02 = custflows_test02['custflows']
        p_test02 = custflows_test02['pcustflows']
        custflows_test02 = custflows_test02.reset_index(drop=True)
        custflows_testxy02 = custflows_test02.copy()
        custflows_testxy02 = custflows_testxy02.drop('shop_id', axis=1)
        custflows_testxy02 = custflows_testxy02.drop('date', axis=1)
        custflows_testxy02 = custflows_testxy02.drop('pcustflows', axis=1)

        custflows_test03 = custflows_test[custflows_test.hinfluence < lines_down]
        custflows_test03 = custflows_test03[holiday_cols]
        y_test03 = custflows_test03['custflows']
        p_test03 = custflows_test03['pcustflows']
        custflows_test03 = custflows_test03.reset_index(drop=True)
        custflows_testxy03 = custflows_test03.copy()
        custflows_testxy03 = custflows_testxy03.drop('shop_id', axis=1)
        custflows_testxy03 = custflows_testxy03.drop('date', axis=1)
        custflows_testxy03 = custflows_testxy03.drop('pcustflows', axis=1)

        label_name = 'custflows'
        if len(custflows_testxy01) > 0:
            feats = [i for i in custflows_trainxy01.columns]
            feats.remove(label_name)
            pred_y01, oof01 = model_train_regression(custflows_trainxy01, custflows_testxy01, feats, label_name, 5)
            custflows_test01['preds'] = pred_y01
            custflows_test_lst.append(custflows_test01[['shop_id', 'date', 'custflows', 'pcustflows', 'preds']])

        if len(custflows_testxy02) > 0:
            assert len(custflows_trainxy02) > 0
            feats = [i for i in custflows_trainxy02.columns]
            feats.remove(label_name)
            pred_y02, oof02 = model_train_regression02(custflows_trainxy02, custflows_testxy02, feats, label_name)
            custflows_test02['preds'] = pred_y02
            custflows_test_lst.append(custflows_test02[['shop_id', 'date', 'custflows', 'pcustflows', 'preds']])

        if len(custflows_testxy03) > 0:
            assert len(custflows_trainxy03) > 0
            feats = [i for i in custflows_trainxy03.columns]
            feats.remove(label_name)
            pred_y03, oof03 = model_train_regression02(custflows_trainxy03, custflows_testxy03, feats, label_name)
            custflows_test03['preds'] = pred_y03
            custflows_test_lst.append(custflows_test03[['shop_id', 'date', 'custflows', 'pcustflows', 'preds']])

        start_tuesday = start_tuesday + datetime.timedelta(days=7)

    custflows_test_df = pd.concat(custflows_test_lst, ignore_index=True)
    custflows_test_df = custflows_test_df.sort_values('date')
    custflows_test_df = custflows_test_df.reset_index(drop=True)

    score_lst = []
    for shop_id, group in custflows_test_df.groupby('shop_id'):
        test_score = metrics.mean_absolute_percentage_error(group['custflows'],group['preds'])
        test_score_p = metrics.mean_absolute_percentage_error(group['custflows'],group['pcustflows'])
        print(test_score,test_score_p) # train_score_vals
        score_lst.append([shop_id,test_score,test_score_p]) # train_score_vals

        group = group.sort_values('date')
        plt.plot(group['pcustflows'], color="yellow")
        plt.plot(group['custflows'], color="red")
        plt.plot(group['preds'], color="blue")
        plt.legend(['Old Predicted custflows', 'Actual custflows', 'Predicted custflows'])
        plt.title(shop_id)
        plt.savefig('result_test/{}.png'.format(shop_id))  # plt.show()
        plt.close()

    score_arr = np.array([dd[1:] for dd in score_lst])
    print(score_arr.mean(axis=0))
    scored_df = pd.DataFrame(score_lst,columns=['shop_id','score_test','score_old',]) # score_val
    scored_df['err'] = scored_df.score_test - scored_df.score_old
    scored_df = scored_df.sort_values('err')
    scored_df01 = scored_df[scored_df.err <= 0]
    scored_df02 = scored_df[scored_df.err > 0]
    print("共{}家门店效果更好，平均mape优于原模型{}个百分点".format(len(scored_df01),round(abs(scored_df01.err.mean()),3)*100))
    print("共{}家门店效果更差，平均mape差于原模型{}个百分点".format(len(scored_df02),round(scored_df02.err.mean(),3)*100))
    scored_df.to_csv('scored_df.csv',index=False)

    custflows_result = custflows_test_df[['shop_id','date','preds']]
    custflows_result.to_csv('custflows_result.csv',index=False)


    print(time.time()-start_t,'finish!!!')




