# import datetime
# import json
# import os
# import pickle
# import time
#
# import numpy as np
# import pandas as pd
# import math
# import requests
# from sklearn.model_selection import train_test_split
# from sklearn.preprocessing import OneHotEncoder, StandardScaler
# from sklearn.linear_model import LinearRegression
# from src.utils.model_utils import getMape,custom_mean_absolute_percentage
#
# from src.mobile.anomaly.anomaly_detect import MobileAnomalyDetector
# from src.mobile.anomaly.anomaly_load import load_anomaly_document_item
# from src.mobile.model import mobile_model_config
# from src.mobile.model.mobile_model_config import ABT_RY_sku_list
# from src.mobile.preprocessor.settle_data_loader import preprocess_settle_data
# from src.utils.config import config, logger
# from src.utils.db_processor import mysql_prediction_processor, postgre_processor,mysql_price_model, mysql_processor
# from src.utils.dingding_message import dingding_messager
# from src.utils.redis_pool import redis_conn
# from src.utils.util import check_conflict_file, get_today, format_date_string
# from src.utils.model_utils import getMape
#
# from src.mobile.job.sku_price_complement import MOBILE_DATA_SQL,PRODUCT_SQL,COMPLETE_DATA_SQL,LEVEL_SQL,\
#     LEVEL_TEMPLATE_MAPPING_SQL,FEATURES,BRAND_ID_NAME_SQL
# from src.mobile.predict.mobile_price_config import MODEL_DIR, MAX_EVN_PRICE, \
#     HISTORY_AVG_PRICE_WEIGHT, MODEL_PRICE_WEIGHT
#
# import tensorflow as tf
# from tensorflow import keras
# # from tensorflow.keras import layers
# # from tensorflow.keras.layers import Dense, Embedding, Concatenate, Dropout, Input, Layer
#
# from tensorflow.python.keras.backend import set_session
# # from tensorflow.python.ops import math_ops
# # from tensorflow.python.keras import backend as K
# # from src.utils.model_utils import weight_function
# from tensorflow.python.keras import regularizers
#
# from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
# import os
# import sys
#
#
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# session_config = tf.ConfigProto(allow_soft_placement=True)
# session_config.gpu_options.per_process_gpu_memory_fraction = 0.9  # 占用GPU90%的显存
# session_config.gpu_options.allow_growth = True
# sess = tf.Session(config=session_config)
# global graph
# graph = tf.get_default_graph()
#
# log_filepath = 'logs/keras_log'
# iters = 0
# inf=sys.float_info.min
# MIN_VALUE=5
#
#
# def load_history_avg_price(key):
#     """
#     读取均价均价
#     :param key: key
#     :return:
#     """
#     price = redis_conn.get(key)
#     if price is None:
#         return price
#     return int(float(price))
#
# class MobileModel:
#     def __init__(self, model_date=None):
#         """
#
#         :param model_date: 建模日期
#         """
#         if model_date is None:
#             self.model_date = get_today()
#         else:
#             self.model_date = model_date
#
#         self.model_data = None
#         self.model = None
#         self.train_history = None
#         # 特征变量One-Hot编码处理器
#         self.ohe = OneHotEncoder(handle_unknown='ignore')
#         # 增加一个产品的One-Hot，方便在预测时判断是否是新型号
#         self.product_ohe = None
#         self.mysql_price_num=0
#         # 价格标准化
#         self.scaler = StandardScaler()
#         # api缓存清除url
#         self.ab_price_api_url = config.get_config(
#             'ab_price_api', 'cache_clear_url')
#         self._OLS_EXCEPTION_FLAG_ = False
#         self.SKU_ID_CHECK_LIST = []
#
#     def load_model_data(self):
#         """
#         读取建模数据
#         :return:
#         """
#         self.model_data, self.product_ohe = preprocess_settle_data()
#         logger.info('model_data shape@{}'.format(self.model_data.shape))
#
#     def eliminate_anomaly(self):
#         """
#         排除异常数据
#         :return:
#         """
#         logger.info('eliminating anomaly...')
#         anomaly_document_item = load_anomaly_document_item()
#         if not anomaly_document_item.empty:
#             logger.info('eliminating anomaly products size@{}'.format(
#                 anomaly_document_item.shape))
#             self.model_data = self.model_data[~self.model_data['document_item_id'].isin(
#                 anomaly_document_item['document_item_id'])]
#
#     def detect_anomaly(self):
#         """
#         检测异常数据
#         - 以昨日最新的模型对物品进行预测，预测价和最新的出货价进行对比
#         :return:
#         """
#         history_date = self.model_date - datetime.timedelta(days=1)
#         anomaly_detector = MobileAnomalyDetector(self.model_data, history_date)
#         anomaly_detector.launch_anomaly_detection()
#
#     def weight_function(self, s, relative_date=None, max_period=42):
#         """
#         计算周期，30天为一周期
#         :param s: 日期序列
#         :param relative_date: 当前日期，默认为当天日期
#         :param max_period: 最大周期数
#         :param interval: 周期间隔
#         :return:
#         """
#         if relative_date is None:
#             relative_date = get_today()
#         diff_days = relative_date - s
#         diff_days = diff_days.days
#         period = max_period - diff_days
#         return math.log(min(max(period, 2),42), 15)
#
#     # 数据乱序
#     def shuffle(self):
#         X = self.model_data
#         ind = np.arange(X.shape[0])
#         for i in range(7):
#             np.random.shuffle(ind)
#         return X.loc[ind]
#
#     # def build_model(self, input_shape):
#     #     """
#     #     构建模型
#     #     :param input_shape: 输入特征数量
#     #     :return:
#     #     """
#     #     inputs = keras.Input(shape=(input_shape,))
#     #
#     #     dense = keras.layers.Dense(128, activation='relu')(inputs)
#     #     dense = keras.layers.Dense(64, activation='relu')(dense)
#     #     dense = keras.layers.Dense(32, activation='relu')(dense)
#     #     outputs = keras.layers.Dense(1, activation='linear')(dense)
#     #
#     #     self.model = keras.Model(inputs=inputs, outputs=outputs)
#     #
#     #     opt = keras.optimizers.Adam()
#     #     self.model.compile(optimizer=opt, loss='mse')
#
#     def wd_build_model(self, input_shape, params):
#         """
#         构建模型
#         :param input_shape: 输入特征数量
#         :return:
#         """
#
#         inputs=keras.layers.Input(shape=input_shape)
#         hidden1 = keras.layers.Dense(params['units1'],activation=params['activation'],
#                                      kernel_regularizer=regularizers.l2(params['lr1']),
#                                       kernel_initializer='normal')(inputs)
#         hidden2= keras.layers.Dense(params['units2'], activation=params['activation2'],
#                                           kernel_regularizer=regularizers.l2(params['lr2'])
#                                           # ,activity_regularizer = regularizers.l1(0.005)
#                                           , kernel_initializer='normal'
#                                           )(hidden1)
#         hidden3= keras.layers.Dense(params['units3'], activation=params['activation3'],
#                                           kernel_regularizer=regularizers.l2(params['lr3'])
#                                           , kernel_initializer='normal'
#                                           )(hidden2)
#         concat = keras.layers.concatenate([inputs,hidden3])
#         outputs = keras.layers.Dense(1,
#                                     kernel_regularizer=regularizers.l1_l2(l1=params['lr4'],
#                                                                              l2=params['lr5']),
#                                     activation='linear')(concat)
#         self.model = keras.Model(inputs=inputs, outputs=outputs)
#
#         # self.model.summary()
#         self.model.compile(loss='mse',
#                            optimizer=keras.optimizers.Adam()
#                            , metrics=['mse',custom_mean_absolute_percentage])
#
#         logger.info('模型初始化完成！')
#
#     def build_model(self, input_shape, params):
#         """
#         构建模型
#         :param input_shape: 输入特征数量
#         :return:
#         """
#         self.model = keras.Sequential()
#
#         self.model.add(keras.layers.Dense(params['units1'], activation=params['activation'],
#                                           kernel_regularizer=regularizers.l2(params['lr1']),
#                                           kernel_initializer='normal',
#                                           # activity_regularizer=regularizers.l1(0.005),
#                                           input_shape=(input_shape,)))
#         # self.model.add(keras.layers.Dropout(0.4))
#         self.model.add(keras.layers.Dense(params['units2'], activation=params['activation'],
#                                           kernel_regularizer=regularizers.l2(params['lr2'])
#                                           # ,activity_regularizer = regularizers.l1(0.005)
#                                           , kernel_initializer='normal'
#                                           ))
#         # self.model.add(keras.layers.Dropout(0.3))
#         self.model.add(keras.layers.Dense(params['units3'], activation=params['activation'],
#                                           kernel_regularizer=regularizers.l2(params['lr3'])
#                                           # ,activity_regularizer=regularizers.l1(0.003)
#                                           , kernel_initializer='normal'
#                                           ))
#         # self.model.add(keras.layers.Dropout(0.2))
#         self.model.add(
#             keras.layers.Dense(1, kernel_regularizer=regularizers.l1_l2(l1=params['lr4'], l2=params['lr5']), activation='linear'))
#         # self.model.summary()
#         self.model.compile(loss=self.custom_mean_absolute_percentage,
#                            optimizer=keras.optimizers.Adam()
#                            , metrics=[self.custom_mean_absolute_percentage,'mse']
#                            )
#         logger.info('模型初始化完成！')
#
#
#     # def train_model(self):
#     #     """
#     #     训练模型
#     #     :return:
#     #     """
#     #     if self.model_data is None:
#     #         logger.critical('model_data is None!')
#     #         return
#     #
#     #     # 近期数据
#     #     recent_date = self.model_date - \
#     #         datetime.timedelta(days=mobile_model_config.RECENT_DAYS)
#     #
#     #     self.model_data = self.shuffle()
#     #     self.model_data['weight'] = self.model_data['settle_list_create_date'].apply(self.weight_function)
#     #
#     #     recent_data = self.model_data[self.model_data['settle_list_create_date'] >= recent_date]
#     #     self.model_data=self.model_data[self.model_data['settle_list_create_date'] < recent_date]
#     #     # x_train, x_valid, y_train, y_valid = train_test_split(self.model_data.drop(columns='item_quotation_price_num'),
#     #     #                                                       self.model_data['item_quotation_price_num'],
#     #     #                                                       test_size=0.2)
#     #     x_train = self.model_data.drop(columns='item_quotation_price_num')
#     #
#     #     train_x_inputs = self.ohe.fit_transform(
#     #         x_train[mobile_model_config.MOBILE_FEATURES])
#     #     # valid_x_inputs = self.ohe.transform(
#     #     #     x_valid[mobile_model_config.MOBILE_FEATURES])
#     #     recent_x_inputs = self.ohe.transform(
#     #
#     #         recent_data[mobile_model_config.MOBILE_FEATURES])
#     #     train_y_scaled = self.model_data['item_quotation_price_num']
#     #     # train_y_scaled = self.scaler.fit_transform(
#     #     #     y_train.values.reshape((-1, 1))).flatten()
#     #     # valid_y_scaled = self.scaler.transform(
#     #     #     y_valid.values.reshape((-1, 1))).flatten()
#     #     recent_y_scaled=recent_data['item_quotation_price_num']
#     #     # recent_y_scaled = self.scaler.transform(
#     #     #     recent_data['item_quotation_price_num'].values.reshape((-1, 1))).flatten()
#     #
#     #     logger.info('training model...')
#     #     self.build_model(train_x_inputs.shape[1])
#     #     self.train_history = self.model.fit(train_x_inputs, train_y_scaled,
#     #                                         epochs=100, verbose=1, batch_size=512,
#     #                                         callbacks=mobile_model_config.model_callbacks,
#     #                                         sample_weight=np.array(x_train['weight']))
#     #     # 加权训练近期的数据
#     #     # self.model.fit(recent_x_inputs, recent_y_scaled,
#     #     #                epochs=5, verbose=0, batch_size=512)
#     #     self.model_mse=self.model
#     #     #self.product_ohe.fit(x_train[['product_name']])
#     #     self.score = self.model.evaluate(recent_x_inputs, recent_y_scaled, verbose=0,
#     #                                 sample_weight=np.array(recent_data['weight']))
#     #     test_mape = -1
#     #     test_mape_mse = -1
#     #     try:
#     #
#     #         recent_predict_data=self.model.predict(recent_x_inputs)
#     #         recent_predict_data_mse=self.model_mse.predict(recent_x_inputs)
#     #         test_mape= getMape(recent_predict_data,recent_y_scaled)
#     #         test_mape_mse=getMape(recent_predict_data_mse,recent_y_scaled)
#     #
#     #         logger.info('history mape_model@{},score@{} '.format( test_mape,self.score))
#     #
#     #
#     #     except Exception as e:
#     #         logger.info('test_mape is exception!')
#
#
#     def extract_training_data(self):
#         logger.info("extract_training_data is begin !")
#         if self.model_data is None:
#             logger.critical('model_data is None!')
#             return
#
#         # 数据乱序处理
#         self.model_data = self.shuffle()
#         self.model_data['weight'] = self.model_data['settle_list_create_date'].apply(self.weight_function)
#         logger.info('extract_training_data!')
#
#         # 近期数据
#         recent_date = self.model_date - \
#                       datetime.timedelta(days=mobile_model_config.RECENT_DAYS)
#
#         x_test = self.model_data[self.model_data['settle_list_create_date'] >= recent_date]
#         y_test = x_test['item_quotation_price_num']
#
#         test_x_inputs = self.ohe.fit_transform(
#             x_test[mobile_model_config.MOBILE_FEATURES])
#         # test_y_scaled = self.scaler.fit_transform(
#         #     y_test.values.reshape((-1, 1))).flatten()
#
#         self.test_x_inputs = test_x_inputs
#         self.test_y_scaled = y_test
#         self.weight_test_y_input = x_test['weight']
#
#         # logger.info('方差为：' + str(self.scaler.var_[0]))
#
#     def train_model(self, params):
#         """
#         训练模型
#         :return:
#         """
#
#         os.system("echo 1 > /proc/sys/vm/drop_caches")
#
#         logger.info('training model...')
#         self.wd_build_model(self.test_x_inputs.shape[1], params)
#
#         tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath,  # log 目录
#                                             histogram_freq=0,  # 按照何等频率（epoch）来计算直方图，0为不计算
#                                             #                  batch_size=32,     # 用多大量的数据计算直方图
#                                             write_graph=True,  # 是否存储网络结构图
#                                             write_grads=True,  # 是否可视化梯度直方图
#                                             write_images=True,  # 是否可视化参数
#                                             embeddings_freq=0,
#                                             embeddings_layer_names=None,
#                                             embeddings_metadata=None)
#
#         # 设置log的存储位置，将网络权值以图片格式保持在tensorboard中显示，设置每一个周期计算一次网络的
#
#         x_train, x_valid, y_train, y_valid = train_test_split(self.model_data,
#                                                               self.model_data['item_quotation_price_num'],
#                                                               test_size=0.2)
#         train_x_inputs = self.ohe.transform(
#             x_train[mobile_model_config.MOBILE_FEATURES])
#
#         valid_x_inputs = self.ohe.transform(
#             x_valid[mobile_model_config.MOBILE_FEATURES])
#
#         # train_y_scaled = self.scaler.transform(
#         #     y_train.values.reshape((-1, 1))).flatten()
#         train_y_scaled=y_train
#
#         # valid_y_scaled = self.scaler.transform(
#         #     y_valid.values.reshape((-1, 1))).flatten()
#         valid_y_scaled=y_valid
#         weight_train_y_input = x_train['weight']
#         weight_valid_y_input = x_valid['weight']
#
#         # 权值，每层输出值的分布直方图
#         cbks = [tb_cb,mobile_model_config.early_stop]
#         self.train_history = self.model.fit(train_x_inputs, train_y_scaled,
#                                             validation_data=(
#                                                 valid_x_inputs, valid_y_scaled, weight_valid_y_input),
#                                             epochs=50, verbose=1, batch_size=params['units4'],
#                                             callbacks=cbks, sample_weight=np.array(weight_train_y_input))
#
#         score = self.model.evaluate(self.test_x_inputs, self.test_y_scaled, verbose=0,
#                                     sample_weight=np.array(self.weight_test_y_input))
#
#
#         test_mape = -1
#         try:
#             recent_predict_data=self.model.predict(self.test_x_inputs)
#             recent_predict_data=[x[0] for x in recent_predict_data]
#             test_mape=np.mean(100.*np.abs(
#                 (np.clip(recent_predict_data,5,None)-np.clip(self.test_y_scaled,5,None))
#                                           /
#                                           np.clip(self.test_y_scaled,5,None)
#             )
#                               )
#
#
#         except Exception as e:
#             logger.info('test_mape is exception!')
#
#         import sys
#         sys.stdout.flush()
#         global iters
#         iters += 1
#         logger.info(str(self.train_history.history))
#         logger.info('iters= {},score={},train_loss={},val_loss={},test_mape={},params={},'.format(iters, str(score),
#                                                                                      self.train_history.history[
#                                                                                          'custom_mean_absolute_percentage'][
#                                                                                          -1],
#                                                                                      self.train_history.history[
#                                                                                          'val_custom_mean_absolute_percentage'][
#                                                                                          -1],
#                                                                                     str(test_mape),
#                                                                                      str(params)))
#         return {'accuracy': score[1], 'status': STATUS_OK, 'loss': score[1]}
#
#     def hyper_train_model(self):
#         space = {
#             # 'window': hp.choice('window',[30, 60, 120, 180]),
#             # 'units1': hp.choice('units11', np.arange(64, 2049, 64)),
#             #v1
#             # 'units1': hp.choice('units11', [128, 256, 512, 1024, 1280]),
#             # 'units2': hp.choice('units22', [256,512, 1024, 1280, 1536, 1792, 2048]),
#             # 'units3': hp.choice('units33', [16,32,64, 128, 256,512]),
#             # 'units4': hp.choice('units34', [512, 1024]),
#             #
#             # # 'lr': hp.choice('lr',[0.01, 0.001, 0.0001]),
#             # 'lr1': hp.choice('lr1', np.arange(0.00001, 0.045, 0.0002)),
#             # 'lr2': hp.choice('lr2', np.arange(0.0001, 0.04, 0.0005)),
#             # 'lr3': hp.choice('lr3', np.arange(0.0001, 0.02, 0.0005)),
#             # 'lr4': hp.choice('lr4', np.arange(0.01, 0.45, 0.01)),
#             # 'lr5': hp.choice('lr5', np.arange(0.01, 0.4, 0.01)),
#
#             #v2
#             # 'units1': hp.choice('units11', [512, 1024, 1280]),
#             # 'units2': hp.choice('units22', [256, 512, 1024, 1280, 1536, 1792, 2048]),
#             # 'units3': hp.choice('units33', [16, 32, 64, 128, 256]),
#             # 'units4': hp.choice('units34', [512, 1024, 2048]),
#             #
#             # # 'lr': hp.choice('lr',[0.01, 0.001, 0.0001]),
#             # 'lr1': hp.choice('lr1', np.arange(0.00001, 0.016, 0.00002)),
#             # 'lr2': hp.choice('lr2', np.arange(0.0001, 0.017, 0.00002)),
#             # 'lr3': hp.choice('lr3', np.arange(0.0001, 0.018, 0.00005)),
#             # 'lr4': hp.choice('lr4', np.arange(0.01, 0.15, 0.001)),
#             # 'lr5': hp.choice('lr5', np.arange(0.01, 0.1, 0.001)),
#
#             # v3
#             # 'units1': hp.choice('units11', [512, 1024, 1280]),
#             # 'units2': hp.choice('units22', [256, 512, 1024, 1280, 2048]),
#             # 'units3': hp.choice('units33', [16, 32, 64, 128]),
#             # 'units4': hp.choice('units34', [512, 1024, 2048]),
#             #
#
#             # 'lr1': hp.choice('lr1', np.arange(0.000001, 0.001, 0.000001)),
#             # 'lr2': hp.choice('lr2', np.arange(0.00001, 0.01, 0.00002)),
#             # 'lr3': hp.choice('lr3', np.arange(0.0001, 0.015, 0.00005)),
#             # 'lr4': hp.choice('lr4', np.arange(0.01, 0.15, 0.001)),
#             # 'lr5': hp.choice('lr5', np.arange(0.01, 0.1, 0.001)),
#
#             # v4
#             'units1': hp.choice('units11', [512, 1024, 1280]),
#             'units2': hp.choice('units22', [256, 512, 1024, 1280, 2048]),
#             'units3': hp.choice('units33', [16, 32, 64, 128]),
#             'units4': hp.choice('units34', [512, 1024, 2048]),
#
#             # 'lr': hp.choice('lr',[0.01, 0.001, 0.0001]),
#             'lr1': hp.choice('lr1', np.arange(0.00001, 0.02, 0.00001)),
#             'lr2': hp.choice('lr2', np.arange(0.0001, 0.04, 0.0001)),
#             'lr3': hp.choice('lr3', np.arange(0.0001, 0.018, 0.0001)),
#             'lr4': hp.choice('lr4', np.arange(0.001, 0.3, 0.001)),
#             'lr5': hp.choice('lr5', np.arange(0.001, 0.4, 0.001)),
#             'activation': hp.choice('activations', ['relu', tf.nn.leaky_relu]),
#             'activation2': hp.choice('activations2', ['relu', tf.nn.leaky_relu]),
#             'activation3': hp.choice('activations3',['relu', tf.nn.leaky_relu])
#             # ,
#             # 'loss': hp.choice('lossss', [keras.losses.categorical_crossentropy,
#             #                              # keras.losses.mse,
#             #                              # keras.losses.mae,
#             #                              keras.losses.binary_crossentropy])
#         }
#         trials = Trials()
#         best = fmin(self.train_model, space, algo=tpe.suggest, max_evals=100, trials=trials)
#         logger.info('best {}', best)
#
#     def evaluate_model(self):
#         """
#         评估模型
#         :return:
#         """
#         if self.train_history is None:
#             logger.critical('train_history is None!')
#             return
#
#         epoch = self.train_history.epoch[-1]
#         # 乘以方差来计算实际的loss
#         train_loss = np.round(
#             np.sqrt(self.train_history.history['loss'][-1] * self.scaler.var_[0]), 4)
#         val_loss = np.round(
#             np.sqrt(self.train_history.history['val_loss'][-1] * self.scaler.var_[0]), 4)
#         data_size = self.train_history.params['samples']
#
#         logger.info('saving model evaluation...')
#         evaluation_sql = """
#         REPLACE INTO mobile_price_model_evaluation(model_date, epoch, train_loss, val_loss, data_size)
#         VALUES(%s, %s, %s, %s, %s)
#         """
#         evaluation_data = [(format_date_string(self.model_date), epoch, float(
#             train_loss), float(val_loss), data_size)]
#         mysql_prediction_processor.execute_insert_sql(
#             evaluation_sql, evaluation_data)
#
#     def load_models(self):
#         """
#         读取模型和预处理器
#         :return:
#         """
#         logger.info('loading models...')
#
#         model_file = os.path.join(MODEL_DIR, mobile_model_config.MODEL_FILE_NAME)
#         with graph.as_default():
#             set_session(sess)
#             self.model = keras.models.load_model(model_file)
#
#         ohe_file = os.path.join(MODEL_DIR, mobile_model_config.MOBILE_OHE_NAME)
#         with open(ohe_file, 'rb') as f:
#             self.ohe = pickle.load(f)
#
#         scaler_file = os.path.join(MODEL_DIR, mobile_model_config.MOBILE_SCALER_NAME)
#         with open(scaler_file, 'rb') as f:
#             self.scaler = pickle.load(f)
#
#         product_ohe_file = os.path.join(MODEL_DIR, mobile_model_config.MOBILE_PRODUCT_OHE_NAME)
#         with open(product_ohe_file, 'rb') as f:
#             self.product_ohe = pickle.load(f)
#
#         logger.info('loading models done')
#
#     def save_model(self):
#         """
#         保存模型和预处理器
#         :return:
#         """
#         if self.model is None:
#             logger.critical('model is None!')
#             return
#
#         logger.info('saving model...')
#
#         check_conflict_file(mobile_model_config.MODEL_DIR,
#                             mobile_model_config.MODEL_FILE_NAME)
#         model_file = os.path.join(
#             mobile_model_config.MODEL_DIR, mobile_model_config.MODEL_FILE_NAME)
#         self.model.save(model_file)
#
#         check_conflict_file(mobile_model_config.MODEL_DIR,
#                             mobile_model_config.MOBILE_OHE_NAME)
#         with open(os.path.join(mobile_model_config.MODEL_DIR, mobile_model_config.MOBILE_OHE_NAME), 'wb') as f:
#             pickle.dump(self.ohe, f)
#
#         check_conflict_file(mobile_model_config.MODEL_DIR,
#                             mobile_model_config.MOBILE_SCALER_NAME)
#         with open(os.path.join(mobile_model_config.MODEL_DIR, mobile_model_config.MOBILE_SCALER_NAME), 'wb') as f:
#             pickle.dump(self.scaler, f)
#
#         check_conflict_file(mobile_model_config.MODEL_DIR,
#                             mobile_model_config.MOBILE_PRODUCT_OHE_NAME)
#         with open(os.path.join(mobile_model_config.MODEL_DIR, mobile_model_config.MOBILE_PRODUCT_OHE_NAME), 'wb') as f:
#             pickle.dump(self.product_ohe, f)
#
#     def push_model(self):
#         """
#         推送模型文件
#         :return:
#         """
#         logger.info(
#             'pushing models to remote server model_date@{}'.format(self.model_date))
#         exist_server = mobile_model_config.MODEL_PUSH_SERVERS[0]
#         if exist_server == 'no_server':
#             logger.info('No server need to push, SKIP!')
#             return
#
#         for ip in mobile_model_config.MODEL_PUSH_SERVERS:
#             logger.info('push server to server@{}'.format(ip))
#             model_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models/mobile/'. \
#                 format(mobile_model_config.MODEL_DIR,
#                        mobile_model_config.MODEL_FILE_NAME, ip)
#             os.system(model_push_cmd)
#
#             preprocessor_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models/mobile/'. \
#                 format(mobile_model_config.MODEL_DIR,
#                        mobile_model_config.MOBILE_OHE_NAME, ip)
#             os.system(preprocessor_push_cmd)
#
#             scaler_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models/mobile/'. \
#                 format(mobile_model_config.MODEL_DIR,
#                        mobile_model_config.MOBILE_SCALER_NAME, ip)
#             os.system(scaler_push_cmd)
#
#             product_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models/mobile/'. \
#                 format(mobile_model_config.MODEL_DIR,
#                        mobile_model_config.MOBILE_PRODUCT_OHE_NAME, ip)
#             os.system(product_push_cmd)
#
#             time.sleep(1)
#
#     def push_gc_model(self):
#         """
#         推送模型文件
#         :return:
#         """
#         logger.info(
#             'pushing models to remote server model_date@{}'.format(self.model_date))
#         exist_server = mobile_model_config.MODEL_PUSH_SERVERS[0]
#         if exist_server == 'no_server':
#             logger.info('No server need to push, SKIP!')
#             return
#
#         for ip in mobile_model_config.MODEL_PUSH_SERVERS:
#             logger.info('push server to server@{}'.format(ip))
#             model_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models_gc/mobile/'. \
#                 format(mobile_model_config.MODEL_DIR,
#                        mobile_model_config.MODEL_FILE_NAME, ip)
#             os.system(model_push_cmd)
#
#             preprocessor_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models_gc/mobile/'. \
#                 format(mobile_model_config.MODEL_DIR,
#                        mobile_model_config.MOBILE_OHE_NAME, ip)
#             os.system(preprocessor_push_cmd)
#
#             scaler_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models_gc/mobile/'. \
#                 format(mobile_model_config.MODEL_DIR,
#                        mobile_model_config.MOBILE_SCALER_NAME, ip)
#             os.system(scaler_push_cmd)
#
#             product_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models_gc/mobile/'. \
#                 format(mobile_model_config.MODEL_DIR,
#                        mobile_model_config.MOBILE_PRODUCT_OHE_NAME, ip)
#             os.system(product_push_cmd)
#
#             time.sleep(1)
#
#     def push_reload_model_message(self):
#         """
#         推送重新加载模型消息
#         :return:
#         """
#         logger.info(
#             'pushing reload model message model_date@{}'.format(self.model_date))
#         exist_server = mobile_model_config.MODEL_PUSH_SERVERS[0]
#         if exist_server == 'no_server':
#             logger.info('No server need to push, SKIP!')
#             return
#
#         params = {"category": 1, "reload": 1,"data":[[]]}
#         base_url = 'http://{}:{}/price/predict'
#         for ip in mobile_model_config.MODEL_PUSH_SERVERS:
#             rm_cmd = 'ssh root@{} rm -f /data/thy/price_model/api_health/health_check'.format(
#                 ip)
#             os.system(rm_cmd)
#             time.sleep(60)
#             for port in mobile_model_config.MODEL_SERVER_PORTS:
#                 request_url = base_url.format(ip, port)
#                 logger.info('push reload message to {}'.format(request_url))
#                 # response = requests.post(request_url, json=params)
#                 # results = json.loads(response.content)
#                 results = {'code': -1}
#                 for i in range(1, 4):
#                     response = requests.post(request_url, json=params)
#                     results = json.loads(response.content)
#                     time.sleep(5)
#                     if results['code'] == 0:
#                         logger.info('push reload message success @time {} '.format(i))
#                         break
#                     else:
#                         logger.info('push reload message fail {} times '.format(i))
#                 if results['code'] != 0:
#                     # 如果有推送reload消息异常，那么停止更新
#                     logger.critical(
#                         'push reloading message error! {}:{}'.format(ip, port))
#                     break
#                 time.sleep(5)
#             time.sleep(30)
#             touch_cmd = 'ssh root@{} touch /data/thy/price_model/api_health/health_check'.format(
#                 ip)
#             os.system(touch_cmd)
#             time.sleep(10)
#
#     def clear_api_cache(self):
#         """
#         清除调用接口缓存
#         :return:
#         """
#         if self.ab_price_api_url is None:
#             logger.info('ab_price_api_url is None, SKIP!')
#         else:
#             try:
#                 response = requests.get(self.ab_price_api_url)
#                 if response.status_code != 200:
#                     logger.critical('手机清除abprice缓存失败!')
#                     dingding_messager.send_message('手机清除abprice缓存失败!')
#             except Exception as e:
#                 logger.critical('手机调用清除abprice缓存接口异常：{}'.format(e))
#                 dingding_messager.send_message(
#                     '手机调用清除abprice缓存接口异常：{}'.format(e))
#
#     def clear_models(self):
#         """
#         清理历史模型文件
#         :return:
#         """
#         logger.info('clearing model files {}'.format(self.model_date))
#         clear_timestamp = (get_today(
#         ) - datetime.timedelta(days=mobile_model_config.KEEP_MODEL_DAYS)).timestamp()
#         model_files = os.listdir(mobile_model_config.MODEL_DIR)
#         for file in model_files:
#             if file.endswith('pkl') or file.endswith('h5'):
#                 file_name = os.path.join(mobile_model_config.MODEL_DIR, file)
#                 create_time = os.path.getctime(file_name)
#                 if create_time < clear_timestamp:
#                     logger.info('removing file@{}'.format(file))
#                     os.remove(file_name)
#
#     def cal_history_avg_price(self,to_redis=False):
#         """
#         计算sku历史平均价格
#         :return:
#         """
#         logger.info('calculate history avg price...')
#         history_price_date = self.model_date - datetime.timedelta(
#             days=mobile_model_config.MOBILE_HISTORY_AVG_PRICE_DAYS)
#         history_avg_price_sql = """
#         SELECT product_sku_key, product_level_key, avg(predict_origin) AS avg_price
#         FROM dm.dm_price_predict_export
#         WHERE product_category_id = 1 AND predict_date >= '{}'
#         GROUP BY product_sku_key, product_level_key
#         """.format(history_price_date.strftime('%Y-%m-%d'))
#         history_avg_price = postgre_processor.load_sql(history_avg_price_sql)
#         self.history_avg_price = history_avg_price
#
#         if to_redis:
#             logger.info('saving mobile history price into redis')
#             history_avg_price.index = mobile_model_config.MOBILE_HISTORY_AVG_PRICE_PREFIX + \
#                 history_avg_price['product_sku_key'].astype(
#                     str) + '_' + history_avg_price['product_level_key'].astype(str)
#             redis_dict = history_avg_price['avg_price'].to_dict()
#             # for key, value in redis_dict.items():
#             #     redis_conn.set(
#             #         key, value, ex=mobile_model_config.MOBILE_HISTORY_CACHE_TIME)
#             self.insertInRedisByPipline(redis_dict, 10000)
#             logger.info('saving mobile history price into redis done!')
#
#     def insertInRedisByPipline(self,data,batchSize=10000):
#         i=0
#         pipe=redis_conn.pipeline()
#         for k,v in data.items():
#             i+=1
#             pipe.set(k, v, ex=mobile_model_config.MOBILE_HISTORY_CACHE_TIME)
#             if(i%batchSize==0):
#                 try:
#                     pipe.execute()
#                 except Exception as e:
#                     logger.error('pipline save into redis size i={} has error'.format(i))
#                 # if(i>=200000):
#                 #     break
#                 logger.info('pipline save into redis size i={}'.format(i))
#         pipe.execute()
#     def process_complement_data(self):
#         """
#         处理补全数据
#         :return:
#         """
#         logger.info('processing complement data...')
#
#         data = self.__load_complement_data()
#
#         # 写入pickle
#         #complement_pkl = open('complement.pkl', 'wb')
#         #pickle.dump(data, complement_pkl)
#         #complement_pkl.close()
#         # 读取pickle
#         # complement_pkl = open('complement.pkl', 'rb')
#         # data = pickle.load(complement_pkl)
#
#         data['property'] = data['property'].map(lambda x: x.strip())
#         data['property_value'] = data['property_value'].map(lambda x: x.strip())
#         data.drop_duplicates(subset=['product_sku_id', 'property'], keep='last', inplace=True)
#         data = data.pivot_table(index='product_sku_id', columns='property', values='property_value',
#                                 aggfunc=lambda x: x).reset_index()
#         data.fillna('unknown', inplace=True)
#
#         product_id_data = self.__load_product_data()
#         data = pd.merge(data, product_id_data, on='product_sku_id')
#         data['product_category_id'] = 1
#         data['product_category_name'] = '手机'
#
#         memory_storage_split = data['memory'].str.split('+', expand=True)
#         if memory_storage_split.shape[1] == 1:
#             # 如果只分出来一个字段，那么补上一个
#             memory_storage_split.columns = ['memory_tmp']
#             memory_storage_split['storage_tmp'] = np.nan
#         else:
#             memory_storage_split.columns = ['memory_tmp', 'storage_tmp']
#         data = pd.concat([data, memory_storage_split], axis=1, sort=False)
#         data['memory'] = data['memory_tmp']
#         data['storage'] = data['storage'].where(pd.isnull(data['storage_tmp']), data['storage_tmp'])
#
#         data.loc[data['product_brand_name'] == '苹果', 'memory'] = 'apple_memory'
#         data['color'] = np.where(data['color'] != 'unknown', data['product_name'] + '_X_' + data['color'], 'unknown')
#         data['period'] = '0'
#
#         levels = self.__load_product_levels()
#         brands = self.__load_product_brand()
#         # 做笛卡尔积
#         data['_tmp_key'] = 0
#         levels['_tmp_key'] = 0
#
#         data = pd.merge(data, levels, on='_tmp_key', how='outer').drop(columns='_tmp_key')
#         self.predict_data = pd.merge(data, brands, on='product_brand_name', how='left')
#         self.predict_data.product_brand_id.fillna(-1, inplace=True)
#         template_level_mapping = self.__load_template_level_mapping()
#         self.predict_data = pd.merge(self.predict_data, template_level_mapping,
#                                on=['product_level_template_id', 'product_level_id'])
#         self.predict_data['date'] = self.model_date.strftime('%Y-%m-%d')
#
#     def __load_complement_data(self):
#         """
#         读取测试数据
#         :return:
#         """
#         return postgre_processor.load_sql(COMPLETE_DATA_SQL)
#
#     def __load_product_data(self):
#         """
#         读取产品的ID
#         :return:
#         """
#         product_data = postgre_processor.load_sql(PRODUCT_SQL)
#         product_data['product_level_template_id'] = product_data['product_level_template_id'].astype(str)
#         return product_data
#
#     def __load_product_levels(self):
#         """
#         读取等级
#         :return:
#         """
#         return postgre_processor.load_sql(LEVEL_SQL)
#
#     def __load_product_brand(self):
#         """
#         读取等级
#         :return:
#         """
#         return postgre_processor.load_sql(BRAND_ID_NAME_SQL)
#
#     def __load_template_level_mapping(self):
#         """
#         读取等级模板和等级对应关系
#         :return:
#         """
#         mapping = postgre_processor.load_sql(LEVEL_TEMPLATE_MAPPING_SQL)
#         mapping['product_level_template_id'] = mapping['product_level_template_id'].astype(str)
#         mapping['product_level_id'] = mapping['product_level_id'].astype(int)
#
#         return mapping
#
#     def predict_complement_data(self):
#         """
#         预测补全数据
#         :return:
#         """
#         if self.predict_data is None:
#            logger.critical('predict_data is None!')
#            return
#         if self.model is None:
#             self.load_models()
#
#         logger.info('predicting data...')
#
#         self.predict_data['period'] = '0'
#         self.predict_data['small_version'] = self.predict_data['small_version'].astype(str)
#         test_x_inputs = self.ohe.transform(self.predict_data[FEATURES])
#         t1 = time.time()
#         predict_scaled = self.model.predict(test_x_inputs)
#         logger.info('predicting data... done！')
#         #写入pickle
#         #predict_pkl = open('predict_scaled.pkl', 'wb')
#         #pickle.dump(predict_scaled, predict_pkl)
#         #predict_pkl.close()
#         # 读取pickle
#         # predict_pkl = open('predict_scaled.pkl', 'rb')
#         # predict_scaled = pickle.load(predict_pkl)
#
#         self.predict_data['forecast_reference_price'] = np.round(
#              self.scaler.inverse_transform(predict_scaled).flatten()).astype(int)
#         self.predict_data['predict_origin'] = self.predict_data['forecast_reference_price']
#             # 处理和均价的加权
#         self.predict_data['key'] = mobile_model_config.MOBILE_HISTORY_AVG_PRICE_PREFIX + \
#                               self.predict_data['product_sku_id'].astype(str) + '_' + self.predict_data['product_level_id'].astype(str)
#
#         self.history_avg_price = self.history_avg_price.rename(columns={'product_sku_key': 'product_sku_id','product_level_key':'product_level_id',
#                                                                         'avg_price':'history_avg_price'})
#         self.predict_data = pd.merge(self.predict_data,self.history_avg_price,on=['product_sku_id','product_level_id'],how='left')
#         self.predict_data['history_avg_price'] = self.predict_data['history_avg_price'].where(
#             self.predict_data['history_avg_price'].notnull(), self.predict_data['forecast_reference_price'])
#         self.predict_data['forecast_reference_price'] = self.predict_data['forecast_reference_price'] * MODEL_PRICE_WEIGHT + \
#                                 self.predict_data['history_avg_price'] * HISTORY_AVG_PRICE_WEIGHT
#
#         self.predict_data['is_new_product'] = self.product_ohe.transform(
#             self.predict_data[['product_name']]).sum(axis=1).astype(int)
#         self.predict_data['is_new_product'] = self.predict_data['is_new_product'].replace({0: 1, 1: 0})
#
#         # 处理环保机报价
#         # env_protect = (self.predict_data['product_level_template_id'] == mobile_model_config.EVN_TEMPLATE_ID) \
#         #               & (self.predict_data['is_new_product'] == 1)
#         env_protect = self.predict_data['product_level_template_id'] == mobile_model_config.EVN_TEMPLATE_ID
#         self.predict_data.loc[env_protect, 'forecast_reference_price'] = self.predict_data.loc[
#             env_protect, 'forecast_reference_price'].where(
#             self.predict_data.loc[env_protect, 'forecast_reference_price'] <= MAX_EVN_PRICE, MAX_EVN_PRICE)
#
#         # 处理最小值
#         self.predict_data['forecast_reference_price'] = self.predict_data['forecast_reference_price'].where(
#             self.predict_data['forecast_reference_price'] >= 5, 5)
#
#         logger.info('predicting data done')
#
#     def save_complement_data(self):
#         """
#         保存补全数据
#         :return:
#         """
#         if self.predict_data is not None:
#             logger.info('saving predict data...')
#             # template_level_mapping = self.__load_template_level_mapping()
#             # insert_data = pd.merge(self.predict_data, template_level_mapping,
#             #                        on=['product_level_template_id', 'product_level_id'])
#             dt = datetime.datetime.strptime(time.strftime('%Y-%m-%d'), '%Y-%m-%d') + datetime.timedelta(days=1)
#             self.predict_data['date'] = dt.strftime('%Y-%m-%d')
#             # insert_sql = """
#             #             REPLACE INTO fact_opt_forecast_reference_price(date, product_sku_key, product_sku_name, product_level_key,
#             #             product_level_name, product_key, product_name, product_category_id, product_category_name,
#             #             product_brand_name, forecast_reference_price)
#             #             VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
#             #             """
#             # mysql_processor.execute_insert_sql(insert_sql,
#             #                                    self.predict_data[
#             #                                        ['date', 'product_sku_id', 'product_sku_name', 'product_level_id',
#             #                                         'product_level_name', 'product_id', 'product_name',
#             #                                         'product_category_id', 'product_category_name',
#             #                                         'product_brand_name', 'forecast_reference_price']
#             #                                    ].to_records(index=False).tolist())
#             # logger.info('saving predict data to fact_opt_forecast_reference_price done')
#             insert_sql = """
#             INSERT INTO price_prediction_c_sku_mobile_test(date, product_sku_key, product_sku_name, product_level_key,
#             product_level_name, product_key, product_name, product_category_id, product_category_name, product_brand_id,
#             product_brand_name, predict_origin, forecast_reference_price,is_new_product,POLY_pred_price)
#             VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,  %s,%s, %s, %s)
#             """
#             mysql_price_model.execute_insert_sql(insert_sql,
#                                                self.predict_data[
#                                                    ['date', 'product_sku_id', 'product_sku_name', 'product_level_id',
#                                                     'product_level_name', 'product_id', 'product_name',
#                                                     'product_category_id', 'product_category_name','product_brand_id',
#                                                     'product_brand_name', 'predict_origin', 'frp_new','is_new_product',
#                                                     'ppp_new']
#                                                ].to_records(index=False).tolist())
#             logger.info('saving predict data to mysql done')
#             self.mysql_price_num = len(self.predict_data)
#
#
#
#     def _cal_Performance(self, y_true, y_pred, name_space=['mae','mse','explained_variance_score','r2_score']):
#         '''
#         Input
#         y_true：真实的数据值
#         y_pred：回归模型预测的数据值
#         explained_variance_score：解释回归模型的方差得分，其值取值范围是[0,1]，越接近于1说明自变量越能解释因变量
#         的方差变化，值越小则说明效果越差。
#         mean_absolute_error：平均绝对误差（Mean Absolute Error，MAE），用于评估预测结果和真实数据集的接近程度的程度
#         ，其其值越小说明拟合效果越好。
#         mean_squared_error：均方差（Mean squared error，MSE），该指标计算的是拟合数据和原始数据对应样本点的误差的
#         平方和的均值，其值越小说明拟合效果越好。
#         r2_score：判定系数，其含义是也是解释回归模型的方差得分，其值取值范围是[0,1]，越接近于1说明自变量越能解释因
#         变量的方差变化，值越小则说明效果越差。
#
#         Return
#         Python Dictionary
#         '''
#         from sklearn.metrics import mean_absolute_error, mean_squared_error, explained_variance_score, r2_score
#         model_metrics_name = {'mae':mean_absolute_error, 'mse': mean_squared_error,
#                       'explained_variance_score': explained_variance_score, 'r2_score': r2_score}
#         mertics_result = {}
#
#         for name,func in model_metrics_name.items():
#             if name in name_space:
#                 tmp_score = func(y_true,y_pred)
#                 mertics_result[name] = tmp_score
#
#         return mertics_result
#
#     def smooth_strategy(self, use_template=False, sw_balance=True, balance_rule='stat', N_Degree=10):
#         '''
#         预测分调整
#         Input
#         use_template: 是否使用templateid作为模型拟合主体
#         '''
#         df_data_all = self.predict_data.copy(deep=True)
#
#         set_flag = [True if i in ABT_RY_sku_list else False for i in df_data_all.product_sku_id]
#         df_data = df_data_all[set_flag].copy(deep=True)
#
#
#         # 数据转换
#         df_data['uniform_frp'] = df_data.groupby(['product_sku_id'])['forecast_reference_price'].apply(
#                                          lambda x: x / max(x))
#
#         df_data['log_uniform_frp'] = [np.log(i) for i in df_data.uniform_frp]
#
#         # 特征准备
#         x_name_list = []
#         for degree in range(N_Degree):
#             col_name = 'level_regression_x_' + str(degree)
#             df_data[col_name] = (df_data.groupby('product_sku_id')['product_level_id'].agg('rank') - 1)**degree
#             x_name_list.append(col_name)
#
#
#         if sw_balance:
#             sample_w = self._get_sample_weight(criterion=balance_rule)
#         else:
#             sample_w = {}
#
#
#         if use_template:
#             # 按templateid进行拟合 (接口还没有测试)
#             temp_ypred = df_data.groupby('product_level_template_id').apply(
#                                                     lambda x: self._OLS_fit_func(x, x_name_list, sample_w, sample_balance=sw_balance))
#         else:
#             # 按sku进行拟合
#             temp_ypred = df_data.groupby('product_sku_id').apply(
#                                                     lambda x: self._OLS_fit_func(x, x_name_list, sample_w, sample_balance=sw_balance))
#
#         if self._OLS_EXCEPTION_FLAG_:
#             dingding_messager.send_message('OLS Function Fit Error, Check SKU List: %s' % (str(self.SKU_ID_CHECK_LIST)))
#
#         temp_ypred = temp_ypred.reset_index()
#         temp_ypred.columns = ['product_sku_id', 'id', 'y_pred']
#         temp_ypred = temp_ypred.set_index('id')
#         df_data = df_data.merge(temp_ypred[['y_pred']], left_index=True, right_index=True)
#
#         df_data_all = df_data_all.merge(df_data[['product_sku_id','product_level_id','y_pred']],
#                                         left_on=['product_sku_id','product_level_id'],
#                                         right_on=['product_sku_id', 'product_level_id'],
#                                         how='left')
#         df_data_all['y_pred'] = np.round(df_data_all.y_pred.fillna(-1)).astype(int)
#
#         df_data_all['frp_new'] = np.round(df_data_all[['forecast_reference_price','y_pred']].apply(lambda x:
#                                  x.forecast_reference_price if x.y_pred == -1 else x.y_pred, axis=1)).astype(int)
#         df_data_all['ppp_new'] = np.round(df_data_all[['forecast_reference_price','y_pred']].apply(lambda x:
#                                  x.forecast_reference_price if x.y_pred != -1 else -1, axis=1)).astype(int)
#
#
#         self.predict_data = df_data_all
#
#         result_dict = self._cal_Performance(df_data_all[df_data_all.y_pred != -1].forecast_reference_price,
#                                         df_data_all[df_data_all.y_pred != -1].y_pred,name_space=['mae','mse'])
#         result_dict['original_reverse_rate'], result_dict['level_rr_original'], result_dict['sku_rr_original'] = self._get_reverse_rate(
#                                             df_data_all[df_data_all.y_pred != -1][
#                                             ['product_sku_id','forecast_reference_price','product_level_id']
#                                             ], parm={'oar':1, 'tr':0, 'tbp':0})
#         result_dict['ypred_reverse_rate'], result_dict['level_rr_ypred'], result_dict['sku_rr_ypred'] = self._get_reverse_rate(
#                                             df_data_all[df_data_all.y_pred != -1][
#                                             ['product_sku_id', 'y_pred', 'forecast_reference_price',
#                                             'product_level_id']
#                                             ], parm={'oar':1, 'tr':0, 'tbp':0}, price_name='y_pred')
#         result_dict['decent_model_check'], _, _ = self._get_reverse_rate(df_data_all[df_data_all.y_pred != -1][
#                                             ['product_sku_id', 'y_pred', 'forecast_reference_price',
#                                             'product_level_id']
#                                             ], parm={'oar':0.3, 'tr':0.35, 'tbp':0.35}, price_name='y_pred',
#                                             level_specific=False, sku_specific=False)
#         result_dict['datetime'] = str(get_today())
#
#         df_model_metrics = pd.DataFrame.from_dict(result_dict, orient='index').T.astype(str)
#
#
#         if os.environ.get('environment') == 'deploy':
#             metrics_insert_sql = '''
#             INSERT INTO dm.dulin_model_monitor(date, frp_reverse_rate, ppp_reverse_rate, decent_score, mae, mse, level_rr_frp, level_rr_ppp, sku_rr_frp, sku_rr_ppp)
#             VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
#             '''
#         else:
#             metrics_insert_sql = '''
#             INSERT INTO tmp.dulin_model_monitor(date, frp_reverse_rate, ppp_reverse_rate, decent_score, mae, mse, level_rr_frp, level_rr_ppp, sku_rr_frp, sku_rr_ppp)
#             VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
#             '''
#
#         postgre_processor.execute_insert_sql(metrics_insert_sql,
#                                      df_model_metrics[
#                                      ['datetime', 'original_reverse_rate', 'ypred_reverse_rate',
#                                       'decent_model_check', 'mae', 'mse', 'level_rr_original',
#                                       'level_rr_ypred', 'sku_rr_original', 'sku_rr_ypred']
#                                      ].to_records(index=False).tolist())
#         # print(df_model_metrics)
#         dingding_messager.send_message(
#                 '(通知消息) 模型预测分处理完成! SKU_LEVEL数： %s, SKU数： %s, 处理前倒挂率： %s, 处理后倒挂率： %s' %
#                 (str(len(df_data_all[df_data_all.y_pred != -1])),
#                  str(df_data_all[df_data_all.y_pred != -1].product_sku_id.nunique()),
#                  str(result_dict['original_reverse_rate']), str(result_dict['ypred_reverse_rate'])))
#
#
#
#     # 获取sample_weight
#
#     def _get_sample_weight(self, criterion):
#         '''
#         Input
#         criterion: 'stat' 统计变量得出各level权重
#                    'rule' 经验权重
#         Return
#         Dictionary
#         '''
#         sample_weight_dict = {}
#         if criterion == 'rule':
#             sample_weight_dict = {'S': 100,'A+': 98,'A1': 96,'A': 94,'A2': 92,'B+1': 90,'B+': 88,'B+2': 86,
#                             'B1': 84,'B': 82,'B2': 80,'C+1': 78,'C+': 76,'C+2': 74,'C1': 72,'C': 70,'C2': 68,'D+1': 66,
#                             'D+': 64,'D+2': 62,'D1': 60,'D': 58,'D2': 56,'D3': 54,'E+1': 52,'E+': 50,'E+2': 48,'E1': 46,
#                             'E': 44,'E2': 42,'F+': 40,'F': 38,'G+': 36,'G1': 34,'G': 32,'G2': 30,'H+': 28,'H1': 26,
#                             'H': 24,'H2': 22,'I+': 20,'I1': 18,'I': 16,'I2': 14,'J+': 12,'J': 10,'K+': 8,'K': 6}
#
#         elif criterion == 'stat':
#             # 拓展transform，通过大类class进行定义，然后transform回小类别权重
#             sample_weight_dict = self.model_data.groupby('product_level_name')['product_no'].agg('count').to_dict()
#
#         else:
#             print('critical error: criterion rule is not defined')
#         return sample_weight_dict
#
#
#
#     # 模型训练预测
#     def _OLS_fit_func(self, df_look, x_columns, sample_weight_dict, intercept=False, sample_balance=True,
#                       balance_column='product_level_name', y_column='log_uniform_frp'):
#         '''
#         Input
#         df_look: Dataframe
#         intercept: bool
#         sample_balance: bool
#         balance_criterion: 'stat' 统计变量得出各level权重
#                            'rule' 经验权重
#         balance_column: string
#         x_columns: list
#         y_column: string
#
#         Return
#         Series exp_ypred
#         '''
#         try:
#             OLS_model = LinearRegression(fit_intercept=intercept)
#             # 可以考虑通过self.model_data进行weight dict的设置
#             if sample_balance:
#                 sw = [sample_weight_dict[i] for i in df_look[balance_column]]
#             else:
#                 sw = [1 for i in df_look[balance_column]]
#
#             # model fit
#             OLS_model.fit(df_look[x_columns], df_look[y_column], sample_weight=sw)
#             # model param
#             df_look['exp_ypred'] = np.exp(OLS_model.predict(df_look[x_columns])) * df_look.forecast_reference_price.max()
#         except Exception as e:
#             df_look['exp_ypred'] = df_look.forecast_reference_price
#             self.SKU_ID_CHECK_LIST.append(df_look.product_sku_id.unique()[0])
#             self._OLS_EXCEPTION_FLAG_ = True
#
#         # result show
#         return df_look['exp_ypred']
#
#     def _get_reverse_rate(self, df_pred_level, price_name='forecast_reference_price',
#                           base_name='forecast_reference_price', parm={'oar':0.3, 'tr':0.35, 'tbp':0.35},
#                           level_specific=True, sku_specific=True):
#         '''
#         Input:
#         df_pred_level: DataFrame
#
#         Return:
#         reverse_rate: float
#         '''
#         def _rolling_min_(df):
#             temp_min = [df[price_name].iloc[0]] + [df[price_name].head(i).min()
#                                                                    for i in range(1,len(df))]
#             return pd.DataFrame(temp_min,index=df.index,columns=['rolling_min'])
#         df_pred_level = df_pred_level.sort_values('product_level_id', ascending=True)
#         temp = df_pred_level.groupby('product_sku_id')[[price_name,'product_sku_id']].apply(
#                                                                                     lambda x: _rolling_min_(x))
#         df_pred_level['rolling_min'] = temp['rolling_min']
#     #         df_pred_level.to_pickle('/data/dulin/check_datasource/Output_datasource/df_pred_reverse_cal.pkl')
#         over_all_rr = len(df_pred_level[df_pred_level[price_name] > (50 + df_pred_level.rolling_min)]) / len(df_pred_level)
#
#         temp_ = df_pred_level.groupby('product_sku_id').head(5)
#
#         top_rr = len(temp_[temp_[price_name] > (50 + temp_.rolling_min)]) / len(temp_)
#
#         temp_['bias_percent'] = (temp_[price_name] - temp_[base_name]) / temp_[base_name]
#
#         top_bp = temp_.bias_percent.abs().mean()
#
#         if level_specific:
#             level_specific_rr = df_pred_level.groupby('product_level_id').apply(lambda x:
#                                               (x[price_name] > (50 + x.rolling_min)).sum() / len(x)).to_dict()
#         else:
#             level_specific_rr = {}
#
#         if sku_specific:
#             sku_specific_rr = df_pred_level.groupby('product_sku_id').apply(lambda x:
#                                               (x[price_name] > (50 + x.rolling_min)).sum()).to_dict()
#         else:
#             sku_specific_rr = {}
#
#         return parm['oar']*over_all_rr + parm['tr']*top_rr + parm['tbp']*top_bp, str(level_specific_rr), str(sku_specific_rr)
#
#
#
#     def launch_model(self):
#         """
#         启动模型过程
#         :return:
#         # """
#         self.load_model_data()
#         # self.eliminate_anomaly()
#         # self.detect_anomaly()
#         # # 在当日异常数据处理完成后，再次调用排除异常的方法来排除当日异常数据
#         # self.eliminate_anomaly()
#         self.extract_training_data()
#         self.hyper_train_model()
#         # self.evaluate_model()
#         self.save_model()
