# -*- coding: utf-8 -*-

import pickle
import os
import sys
import pandas as pd
import numpy as np
import lightgbm as lgb
import scipy.stats as st
from copy import deepcopy
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import mean_absolute_error

join = os.path.join
dirname = os.path.dirname
CURRENT_PATH = dirname(os.path.realpath(__file__))
sys.path.append(CURRENT_PATH)
from utils import error_calc

'''
模型训练脚本

input: csv 数据清洗完毕，字段衍生完毕可以用于训练模型的数据集
output: pkl 模型文件
output: metric 模型结果指标
'''




### 设置随机种子
seed = np.random.seed(2021)

target = 'unit_mileage'
initial_state_feature = ['vin', 'start_soc', 'distance', 'start_longitude', 'start_latitude','end_longitude',
                        'end_latitude', 'start_leijilicheng', 'report_month', 'week_day', 'hour']

# 临近几次行程段特征
shift_feature_list = ['start_soc', 'end_soc', 'start_longitude', 'start_latitude', 'end_longitude', 'end_latitude',
                      'start_leijilicheng', 'end_leijilicheng', 'total_distance_time', 'proportion_parking_time',
                      'braking_frequency','power_delta', 'speed', 'unit_mileage','average_driving_speed', 'average_acceleration', 'max_acceleration',
                      'acceleration_ratio', 'deceleration_ratio', 'uniform_ratio', 'idle_speed_ratio','squares_velocity_sum','positive_acceleration_average','negative_acceleration_average',
                      'jiasutabanxingcheng_maximum', 'jiasutabanxingcheng_minimum', 'jiasutabanxingcheng_meanvalue',
                      'jiasutabanxingcheng_variance', 'jiasutabanxingcheng_frequency', 'distance', 'cheshu_maximum',
                      'cheshu_minimumm', 'cheshu_mean_value', 'cheshu_mean_variance', 'cheshu_mean_median',
                      'dianJiZhuanQu_maximum','dianJiZhuanQu_minimumm', 'dianJiZhuanQu_mean_value', 'dianJiZhuanQu_variance',
                      'dianJiZhuanQu_median', 'zongdianya_maximum','zongdianya_minimumm', 'zongdianya_mean_value', 'zongdianya_variance', 'zongdianya_median',
                      'zongdianliu_maximum','zongdianliu_minimumm', 'zongdianliu_mean_value', 'zongdianliu_variance', 'zongdianliu_median',
                      'zuigaowenduzhi_maximum', 'zuigaowenduzhi_minimumm', 'zuigaowenduzhi_mean_value',
                      'zuigaowenduzhi_variance', 'zuigaowenduzhi_median', 'jueyuanzhuzhi_maximum',
                      'jueyuanzhuzhi_minimumm', 'jueyuanzhuzhi_mean_value', 'jueyuanzhuzhi_variance',
                      'jueyuanzhuzhi_median']

lag_features = []
for f in shift_feature_list:
    for k in range(1,11):
        lag_features.append('last_' + str(k) + '_' + f)

# 初始特征范围
origin_features = initial_state_feature + lag_features
origin_features = [ i.lower() for i in origin_features]

### 类别变量定义
categorical_features = ['vin','report_month','week_day','hour']

if __name__ == '__main__':


    working_condition_data_df = pd.read_csv(r'',\
                            sep = '\t',header = None)

    le = LabelEncoder()
    vin_lst = list(le.fit_transform(working_condition_data_df['vin']))
    working_condition_data_df['vin'] = le.fit_transform(working_condition_data_df['vin'])

    X = working_condition_data_df.drop(columns=['unit_mileage'])
    X.drop(columns = ['start_report_time','end_report_time'] , inplace = True)
    X.drop(columns = ['vin'] , inplace = True)
    X = X.astype('float64')

    y = working_condition_data_df['unit_mileage']

    ####################################################################################
    X_train, X_test, y_train, y_test = train_test_split(working_condition_data_df.drop(columns=['unit_mileage']),working_condition_data_df['unit_mileage'], test_size=0.2, random_state=0)
    X_test, X_valid, y_test, y_valid = train_test_split(X_test, y_test, test_size=0.3, random_state=0)

    col_use = working_condition_data_df.columns.tolist()
    col_use.remove('unit_mileage')

    param_dict = {
            'num_leaves': st.randint(low=20, high=600),
            'max_depth': st.randint(low=5, high=30),
            'min_data_in_leaf': st.randint(low=20, high=121),
            'min_sum_hessian_in_leaf': st.uniform(loc=0, scale=1),
            'bagging_fraction': st.uniform(loc=0.5, scale=0.5),
            'feature_fraction': st.uniform(loc=0.5, scale=0.5),
            'bagging_freq': st.randint(low=0, high=100),
            'feature_fraction_bynode': st.uniform(loc=0, scale=1),
            'lambda_l1': st.uniform(loc=0, scale=1),
            'lambda_l2': st.uniform(loc=0, scale=1),
            'min_gain_to_split': st.uniform(loc=0, scale=1),
             'drop_rate':st.uniform(loc=0,scale=1),
            'max_bin': st.randint(low=5, high=550),
            'min_data_in_bin': st.randint(low=3, high=20),
        }

    reg = RandomizedSearchCV(estimator=lgb.LGBMRegressor(boosting_type='gbdt', num_threads=os.cpu_count(),\
                                                         n_estimators=1500,learning_rate=0.01), \
                             param_distributions=param_dict,\
                             scoring='neg_mean_squared_error',\
                             cv=5,random_state=2020,n_jobs=-1)

    search = reg.fit(X_train[origin_features], y_train, eval_set=[(X_valid[origin_features], y_valid)], \
                     categorical_feature=categorical_features, verbose=100,eval_metric='mape')

    print("The Best Params is:")
    print(search.best_params_)

    ### 预测结果输出
    result_df = deepcopy(X_test)
    result_df['unit_mileage'] = y_test
    result_df['predict_unit_mileage'] = search.predict(X_test[origin_features])
    result_df['error'] = result_df.apply(lambda x: error_calc(x['unit_mileage'], x['predict_unit_mileage']), axis=1)
    result_df['vin'] = le.inverse_transform(result_df['vin'])
    print(result_df['error'].mean())





