import datetime
import json
import os
import pickle
import time

import numpy as np
import pandas as pd
import math
import requests
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.linear_model import LinearRegression
from src.utils.model_utils import getMape,custom_mean_absolute_percentage

from src.mobile.anomaly.anomaly_detect import MobileAnomalyDetector
from src.mobile.anomaly.anomaly_load import load_anomaly_document_item
from src.mobile.model import mobile_model_config
from src.mobile.model.mobile_model_config import ABT_RY_sku_list
from src.mobile.model.smooth_preprocessor_new import intelligent_data_smooth
from src.mobile.preprocessor.settle_data_loader import preprocess_settle_data
from src.mobile.preprocessor.model_data_preprocessor import preprocess_model_data,repeatData
from src.utils.config import config, logger
from src.utils.db_processor import mysql_prediction_processor,presto_processor
from src.utils.feishu_message import feishu_messager
from src.utils.redis_pool import redis_conn
from src.utils.util import check_conflict_file, get_today, format_date_string,format_date
from src.utils.model_utils import getMape
from tensorflow.python.keras import regularizers
from src.mobile.job.sku_price_complement import PRODUCT_SQL,COMPLETE_DATA_SQL,LEVEL_SQL,\
    LEVEL_TEMPLATE_MAPPING_SQL,FEATURES,BRAND_ID_NAME_SQL,BASEPRICE_MOBILE_SQL,EXCLUDE_SKU_SQL
from src.mobile.predict.mobile_price_config import MODEL_DIR, MAX_EVN_PRICE, \
    HISTORY_AVG_PRICE_WEIGHT, MODEL_PRICE_WEIGHT

import tensorflow as tf
from tensorflow import keras
from tensorflow.python.keras.backend import set_session
session_config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=session_config)
global graph
graph = tf.get_default_graph()

def load_history_avg_price(key):
    """
    读取均价均价
    :param key: key
    :return:
    """
    price = redis_conn.get(key)
    if price is None:
        return price
    return int(float(price))

class MobileModel:
    def __init__(self, model_date=None):
        """

        :param model_date: 建模日期
        """
        if model_date is None:
            self.model_date = get_today()
        else:
            self.model_date = format_date(model_date)

        self.model_data = None
        self.model = None
        self.train_history = None
        # 特征变量One-Hot编码处理器
        self.ohe = OneHotEncoder(handle_unknown='ignore')
        # 增加一个产品的One-Hot，方便在预测时判断是否是新型号
        self.product_ohe = None
        self.mysql_price_num=0
        # 价格标准化
        self.scaler = StandardScaler()
        # api缓存清除url
        self.ab_price_api_url = config.get_config(
            'ab_price_api', 'cache_clear_url')
        self._OLS_EXCEPTION_FLAG_ = False
        self.SKU_ID_CHECK_LIST = []
        self.history_avg_price=pd.DataFrame({'product_sku_key':[], 'product_level_key':[],'avg_price':[]})

    def load_model_data(self):
        """
        读取建模数据
        :return:
        """
        self.model_data, self.product_ohe = preprocess_settle_data()
        logger.info('model_data shape@{}'.format(self.model_data.shape))

    def eliminate_anomaly(self):
        """
        排除异常数据
        :return:
        """
        logger.info('eliminating anomaly...')
        anomaly_document_item = load_anomaly_document_item()
        if not anomaly_document_item.empty:
            logger.info('eliminating anomaly products size@{}'.format(
                anomaly_document_item.shape))
            self.model_data = self.model_data[~self.model_data['document_item_id'].isin(
                anomaly_document_item['document_item_id'])]

    def detect_anomaly(self):
        """
        检测异常数据
        - 以昨日最新的模型对物品进行预测，预测价和最新的出货价进行对比
        :return:
        """
        history_date = self.model_date - datetime.timedelta(days=1)
        anomaly_detector = MobileAnomalyDetector(self.model_data, history_date)
        anomaly_detector.launch_anomaly_detection()

    def weight_function(self, s, relative_date=None, max_period=42):
        """
        计算周期，30天为一周期
        :param s: 日期序列
        :param relative_date: 当前日期，默认为当天日期
        :param max_period: 最大周期数
        :param interval: 周期间隔
        :return:
        """
        if relative_date is None:
            relative_date = get_today()
        diff_days = relative_date - s
        diff_days = diff_days.days
        period = max_period - diff_days
        # return math.log(min(max(period, 2),42), 15)
        return min(max(period, 1),42)/ 3.0

    # 数据乱序
    def shuffle(self):
        X = self.model_data
        ind = np.arange(X.shape[0])
        for i in range(7):
            np.random.shuffle(ind)
        return X.loc[ind]

    def build_model(self, input_shape):
        """
        构建模型
        :param input_shape: 输入特征数量
        :return:
        """
        inputs = keras.Input(shape=(input_shape,))

        dense = keras.layers.Dense(128, activation='relu')(inputs)
        dense = keras.layers.Dense(64, activation='relu')(dense)
        dense = keras.layers.Dense(32, activation='relu')(dense)
        outputs = keras.layers.Dense(1, activation='linear')(dense)

        self.model = keras.Model(inputs=inputs, outputs=outputs)

        opt = keras.optimizers.Adam()
        self.model.compile(optimizer=opt, loss='mse')

    def wd_build_model(self, input_shape):
        """
        构建模型
        :param input_shape: 输入特征数量
        :return:
        """

        inputs=keras.layers.Input(shape=input_shape)
        hidden1 = keras.layers.Dense(1280,activation='relu',
                                     kernel_regularizer=regularizers.l2(1.8e-05),
                                      kernel_initializer='normal')(inputs)
        hidden2= keras.layers.Dense(2048, activation=tf.nn.leaky_relu,
                                          kernel_regularizer=regularizers.l2(0.005790000000000001)
                                          # ,activity_regularizer = regularizers.l1(0.005)
                                          , kernel_initializer='normal'
                                          )(hidden1)
        hidden3= keras.layers.Dense(128, activation='relu',
                                          kernel_regularizer=regularizers.l2(0.0043500000000000014)
                                          , kernel_initializer='normal'
                                          )(hidden2)
        concat = keras.layers.concatenate([inputs,hidden3])
        outputs = keras.layers.Dense(1,
                                    kernel_regularizer=regularizers.l1_l2(l1=0.02099999999999999,
                                                                             l2=0.05999999999999996),
                                    activation='linear')(concat)
        self.model = keras.Model(inputs=inputs, outputs=outputs)

        # self.model.summary()
        self.model.compile(loss=custom_mean_absolute_percentage,
                           optimizer=keras.optimizers.Adam()
                           , metrics=[custom_mean_absolute_percentage,'mse'])


        logger.info('模型初始化完成！')

    def wd_build_model_mse(self, input_shape):
        """
        构建模型
        :param input_shape: 输入特征数量
        :return:
        """

        inputs=keras.layers.Input(shape=input_shape)
        hidden1 = keras.layers.Dense(1280,activation='relu',
                                     kernel_regularizer=regularizers.l2(0.02861),
                                      kernel_initializer='normal')(inputs)
        hidden2= keras.layers.Dense(256, activation='relu',
                                          kernel_regularizer=regularizers.l2(0.0191)
                                          # ,activity_regularizer = regularizers.l1(0.005)
                                          , kernel_initializer='normal'
                                          )(hidden1)
        hidden3= keras.layers.Dense(32, activation='relu',
                                          kernel_regularizer=regularizers.l2(0.0106)
                                          , kernel_initializer='normal'
                                          )(hidden2)
        concat = keras.layers.concatenate([inputs,hidden3])
        outputs = keras.layers.Dense(1,
                                    kernel_regularizer=regularizers.l1_l2(l1=0.01,
                                                                             l2=0.21000000000000002),
                                    activation='linear')(concat)
        self.model = keras.Model(inputs=inputs, outputs=outputs)

        # self.model.summary()
        self.model.compile(loss='mse',
                           optimizer=keras.optimizers.Adam()
                           , metrics=['mse',custom_mean_absolute_percentage])


        logger.info('模型初始化完成！')

    def build_model_mse(self, input_shape):
        """
        构建模型
        :param input_shape: 输入特征数量
        :return:
        """

        inputs=keras.layers.Input(shape=input_shape)
        hidden1 = keras.layers.Dense(1280,activation='relu',
                                     kernel_regularizer=regularizers.l2(0.00509),
                                      kernel_initializer='normal')(inputs)
        hidden2= keras.layers.Dense(1280, activation=tf.nn.leaky_relu,
                                          kernel_regularizer=regularizers.l2(0.016)
                                          # ,activity_regularizer = regularizers.l1(0.005)
                                          , kernel_initializer='normal'
                                          )(hidden1)
        hidden3= keras.layers.Dense(32, activation='relu',
                                          kernel_regularizer=regularizers.l2(0.0095)
                                          , kernel_initializer='normal'
                                          )(hidden2)
        # concat = keras.layers.concatenate([inputs,hidden3])
        outputs = keras.layers.Dense(1,
                                    kernel_regularizer=regularizers.l1_l2(l1=0.187,
                                                                             l2=0.11900000000000001),
                                    activation='linear')(hidden3)
        self.model = keras.Model(inputs=inputs, outputs=outputs)

        # self.model.summary()
        self.model.compile(loss='mse',
                           optimizer=keras.optimizers.Adam()
                           , metrics=['mse',custom_mean_absolute_percentage])


        logger.info('模型初始化完成！')


    def at_build_model_mse(self, input_shape):
        """
        构建模型
        :param input_shape: 输入特征数量
        :return:
        """

        inputs=keras.layers.Input(shape=input_shape)
        attention_probs = keras.layers.Dense(input_shape, activation='softmax', name='attention_vec')(inputs)
        attention_mul = keras.layers.Multiply()([inputs, attention_probs])

        hidden1 = keras.layers.Dense(512,activation=tf.nn.leaky_relu,
                                     kernel_regularizer=regularizers.l2(0.017700000000000004),
                                      kernel_initializer='normal')(attention_mul)
        hidden2= keras.layers.Dense(256, activation='relu',
                                          kernel_regularizer=regularizers.l2(0.04810000000000001)
                                          # ,activity_regularizer = regularizers.l1(0.005)
                                          , kernel_initializer='normal'
                                          )(hidden1)
        hidden3= keras.layers.Dense(64, activation='relu',
                                          kernel_regularizer=regularizers.l2(0.0731)
                                          , kernel_initializer='normal'
                                          )(hidden2)
        outputs = keras.layers.Dense(1,
                                    kernel_regularizer=regularizers.l1_l2(l1=0.21399999999999983,
                                                                             l2=0.3629999999999997),
                                    activation='linear')(hidden3)
        self.model = keras.Model(inputs=inputs, outputs=outputs)

        # self.model.summary()
        self.model.compile(loss='mse',
                           optimizer=keras.optimizers.Adam()
                           , metrics=['mse',custom_mean_absolute_percentage])


        logger.info('模型初始化完成！')

    def train_model_old(self):
        """
        训练模型
        :return:
        """
        if self.model_data is None:
            logger.critical('model_data is None!')
            return

        # 近期数据
        recent_date = self.model_date - \
                      datetime.timedelta(days=mobile_model_config.RECENT_DAYS)
        recent_data = self.model_data[self.model_data['settle_list_create_date'] >= recent_date]
        # repeatDf=repeatData(recent_data,size=3)
        # self.model_data=self.model_data.append(repeatDf,ignore_index=True)

        x_train, x_valid, y_train, y_valid = train_test_split(self.model_data.drop(columns='item_quotation_price_num'),
                                                              self.model_data['item_quotation_price_num'],
                                                              test_size=0.2)
        x_train = self.model_data.drop(columns='item_quotation_price_num')
        y_train = self.model_data['item_quotation_price_num']

        train_x_inputs = self.ohe.fit_transform(
            x_train[mobile_model_config.MOBILE_FEATURES])
        valid_x_inputs = self.ohe.transform(
            x_valid[mobile_model_config.MOBILE_FEATURES])
        recent_x_inputs = self.ohe.transform(
            recent_data[mobile_model_config.MOBILE_FEATURES])

        train_y_scaled = self.scaler.fit_transform(
            y_train.values.reshape((-1, 1))).flatten()
        valid_y_scaled = self.scaler.transform(
            y_valid.values.reshape((-1, 1))).flatten()
        recent_y_scaled = self.scaler.transform(
            recent_data['item_quotation_price_num'].values.reshape((-1, 1))).flatten()

        logger.info('training model...')
        self.build_model_mse(train_x_inputs.shape[1])
        self.train_history = self.model.fit(train_x_inputs, train_y_scaled,
                                            validation_data=(
                                                valid_x_inputs, valid_y_scaled),
                                            epochs=100, verbose=1, batch_size=1024,
                                            callbacks=mobile_model_config.model_callbacks)
        # 加权训练近期的数据
        # self.model.fit(recent_x_inputs, recent_y_scaled,
        #                epochs=5, verbose=0, batch_size=512)

        # self.product_ohe.fit(x_train[['product_name']])

    def train_model(self):
        """
        训练模型
        :return:
        """
        if self.model_data is None:
            logger.critical('model_data is None!')
            return

        # 近期数据
        recent_date = self.model_date - \
            datetime.timedelta(days=mobile_model_config.RECENT_DAYS)
        recent2_date = self.model_date - \
                      datetime.timedelta(days=2)

        recent1_date = self.model_date - \
                      datetime.timedelta(days=1)

        self.model_data = self.model_data.reset_index(drop=True)
        dfempty=self.model_data.loc[self.model_data.product_name.isnull()]
        logger.info('df empty datasize={}'.format(dfempty.shape[0]))
        self.model_data = self.model_data.loc[~self.model_data.product_name.isnull()]
        self.model_data = self.shuffle()
        self.model_data = self.model_data.reset_index(drop=True)
        self.model_data['weight'] = self.model_data['settle_list_create_date'].apply(self.weight_function)
        max_weight=self.model_data['weight'].max()
        min_weight=self.model_data['weight'].min()
        # self.model_data['weight']=1
        recent_data = self.model_data[self.model_data['settle_list_create_date'] >= recent_date]
        recent_data_valid = recent_data.copy()
        repeatDf=repeatData(recent_data,size=3)
        self.model_data=self.model_data.append(repeatDf,ignore_index=True)
        self.model_data = self.model_data.reset_index(drop=True)

        recent_data = recent_data[recent_data['settle_list_create_date'] >= recent2_date]
        repeatDf=repeatData(recent_data,size=3)
        self.model_data = self.model_data.append(repeatDf, ignore_index=True)
        self.model_data = self.model_data.reset_index(drop=True)

        recent_data = recent_data[recent_data['settle_list_create_date'] >= recent1_date]
        repeatDf = repeatData(recent_data, size=3)
        self.model_data = self.model_data.append(repeatDf, ignore_index=True)
        self.model_data = self.model_data.reset_index(drop=True)
        recent_data = recent_data_valid.copy()
        logger.info('train mobilie datasize={},max_weight={};min_weight={}'.format(self.model_data.shape[0],max_weight,min_weight))
        # self.model_data=self.model_data[self.model_data['settle_list_create_date'] < recent_date]
        self.model_data=self.model_data
        # x_train, x_valid, y_train, y_valid = train_test_split(self.model_data.drop(columns='item_quotation_price_num'),
        #                                                       self.model_data['item_quotation_price_num'],
        #                                                       test_size=0.2)
        x_train = self.model_data.drop(columns='item_quotation_price_num')

        train_x_inputs = self.ohe.fit_transform(
            x_train[mobile_model_config.MOBILE_FEATURES])
        # valid_x_inputs = self.ohe.transform(
        #     x_valid[mobile_model_config.MOBILE_FEATURES])
        recent_x_inputs = self.ohe.transform(

            recent_data[mobile_model_config.MOBILE_FEATURES])
        train_y_scaled = self.model_data['item_quotation_price_num']
        # train_y_scaled = self.scaler.fit_transform(
        #     y_train.values.reshape((-1, 1))).flatten()
        # valid_y_scaled = self.scaler.transform(
        #     y_valid.values.reshape((-1, 1))).flatten()
        recent_y_scaled=recent_data['item_quotation_price_num']
        # recent_y_scaled = self.scaler.transform(
        #     recent_data['item_quotation_price_num'].values.reshape((-1, 1))).flatten()

        logger.info('training model...')
        # self.build_model_mse(train_x_inputs.shape[1])
        # self.wd_build_model_mse(train_x_inputs.shape[1])
        self.build_model_mse(train_x_inputs.shape[1])
        self.train_history = self.model.fit(train_x_inputs, train_y_scaled,
                                            epochs=100, verbose=1, batch_size=1024,
                                            callbacks=mobile_model_config.model_callbacks,
                                            sample_weight=np.array(x_train['weight']),use_multiprocessing=True,workers=10)
        # 加权训练近期的数据
        # self.model.fit(recent_x_inputs, recent_y_scaled,
        #                epochs=5, verbose=0, batch_size=512)
        self.model_mse=self.model
        #self.product_ohe.fit(x_train[['product_name']])
        self.score = self.model.evaluate(recent_x_inputs, recent_y_scaled, verbose=1,
                                    sample_weight=np.array(recent_data['weight']))
        test_mape = -1
        test_mape_mse = -1
        try:

            recent_predict_data=self.model.predict(recent_x_inputs)
            recent_predict_data_mse=self.model_mse.predict(recent_x_inputs)
            test_mape= getMape(recent_predict_data,recent_y_scaled)
            test_mape_mse=getMape(recent_predict_data_mse,recent_y_scaled)

            logger.info('history mape_model@{},score@{} '.format( test_mape,self.score))


        except Exception as e:
            logger.info('test_mape is exception!')


    def evaluate_model(self):
        """
        评估模型
        :return:
        """
        if self.train_history is None:
            logger.critical('train_history is None!')
            return

        epoch = self.train_history.epoch[-1]
        # 乘以方差来计算实际的loss
        # train_loss = np.round(
        #     np.sqrt(self.train_history.history['loss'][-1] * self.scaler.var_[0]), 4)
        # val_loss = np.round(
        #     np.sqrt(self.train_history.history['val_loss'][-1] * self.scaler.var_[0]), 4)
        # 乘以方差来计算实际的loss
        train_loss = np.round(
            np.sqrt(self.train_history.history['mean_squared_error'][-1]), 4)
        val_loss = np.round(
            np.sqrt(self.score[-1]), 4)
        data_size = self.train_history.params['samples']

        logger.info('saving model evaluation...')
        evaluation_sql = """
        REPLACE INTO mobile_price_model_evaluation(model_date, epoch, train_loss, val_loss, data_size)
        VALUES(%s, %s, %s, %s, %s)
        """
        evaluation_data = [(format_date_string(self.model_date), epoch, float(
            train_loss), float(val_loss), data_size)]
        mysql_prediction_processor.execute_insert_sql(
            evaluation_sql, evaluation_data)

    def load_models(self):
        """
        读取模型和预处理器
        :return:
        """
        logger.info('loading models...')

        model_file = os.path.join(MODEL_DIR, mobile_model_config.MODEL_FILE_NAME)
        with graph.as_default():
            set_session(sess)
            # self.model = keras.models.load_model(model_file)
            self.model = keras.models.load_model(model_file,custom_objects={'custom_mean_absolute_percentage':custom_mean_absolute_percentage,
                                                                            'leaky_relu':tf.nn.leaky_relu})

        ohe_file = os.path.join(MODEL_DIR, mobile_model_config.MOBILE_OHE_NAME)
        with open(ohe_file, 'rb') as f:
            self.ohe = pickle.load(f)

        scaler_file = os.path.join(MODEL_DIR, mobile_model_config.MOBILE_SCALER_NAME)
        with open(scaler_file, 'rb') as f:
            self.scaler = pickle.load(f)

        product_ohe_file = os.path.join(MODEL_DIR, mobile_model_config.MOBILE_PRODUCT_OHE_NAME)
        with open(product_ohe_file, 'rb') as f:
            self.product_ohe = pickle.load(f)

        logger.info('loading models done')

    def save_model(self):
        """
        保存模型和预处理器
        :return:
        """
        if self.model is None:
            logger.critical('model is None!')
            return

        logger.info('saving model...')

        check_conflict_file(mobile_model_config.MODEL_DIR,
                            mobile_model_config.MODEL_FILE_NAME)
        model_file = os.path.join(
            mobile_model_config.MODEL_DIR, mobile_model_config.MODEL_FILE_NAME)
        self.model.save(model_file)

        check_conflict_file(mobile_model_config.MODEL_DIR,
                            mobile_model_config.MOBILE_OHE_NAME)
        with open(os.path.join(mobile_model_config.MODEL_DIR, mobile_model_config.MOBILE_OHE_NAME), 'wb') as f:
            pickle.dump(self.ohe, f)

        check_conflict_file(mobile_model_config.MODEL_DIR,
                            mobile_model_config.MOBILE_SCALER_NAME)
        with open(os.path.join(mobile_model_config.MODEL_DIR, mobile_model_config.MOBILE_SCALER_NAME), 'wb') as f:
            pickle.dump(self.scaler, f)

        check_conflict_file(mobile_model_config.MODEL_DIR,
                            mobile_model_config.MOBILE_PRODUCT_OHE_NAME)
        with open(os.path.join(mobile_model_config.MODEL_DIR, mobile_model_config.MOBILE_PRODUCT_OHE_NAME), 'wb') as f:
            pickle.dump(self.product_ohe, f)

    def push_model(self):
        """
        推送模型文件
        :return:
        """
        logger.info(
            'pushing models to remote server model_date@{}'.format(self.model_date))
        exist_server = mobile_model_config.MODEL_PUSH_SERVERS[0]
        if exist_server == 'no_server':
            logger.info('No server need to push, SKIP!')
            return

        for ip in mobile_model_config.MODEL_PUSH_SERVERS:
            logger.info('push server to server@{}'.format(ip))
            model_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models/mobile/'. \
                format(mobile_model_config.MODEL_DIR,
                       mobile_model_config.MODEL_FILE_NAME, ip)
            os.system(model_push_cmd)

            preprocessor_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models/mobile/'. \
                format(mobile_model_config.MODEL_DIR,
                       mobile_model_config.MOBILE_OHE_NAME, ip)
            os.system(preprocessor_push_cmd)

            scaler_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models/mobile/'. \
                format(mobile_model_config.MODEL_DIR,
                       mobile_model_config.MOBILE_SCALER_NAME, ip)
            os.system(scaler_push_cmd)

            product_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models/mobile/'. \
                format(mobile_model_config.MODEL_DIR,
                       mobile_model_config.MOBILE_PRODUCT_OHE_NAME, ip)
            os.system(product_push_cmd)

            time.sleep(1)

    def push_gc_model(self):
        """
        推送模型文件
        :return:
        """
        logger.info(
            'pushing models to remote server model_date@{}'.format(self.model_date))
        exist_server = mobile_model_config.MODEL_PUSH_SERVERS[0]
        if exist_server == 'no_server':
            logger.info('No server need to push, SKIP!')
            return

        for ip in mobile_model_config.MODEL_PUSH_SERVERS:
            logger.info('push server to server@{}'.format(ip))
            model_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models_gc/mobile/'. \
                format(mobile_model_config.MODEL_DIR,
                       mobile_model_config.MODEL_FILE_NAME, ip)
            os.system(model_push_cmd)

            preprocessor_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models_gc/mobile/'. \
                format(mobile_model_config.MODEL_DIR,
                       mobile_model_config.MOBILE_OHE_NAME, ip)
            os.system(preprocessor_push_cmd)

            scaler_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models_gc/mobile/'. \
                format(mobile_model_config.MODEL_DIR,
                       mobile_model_config.MOBILE_SCALER_NAME, ip)
            os.system(scaler_push_cmd)

            product_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models_gc/mobile/'. \
                format(mobile_model_config.MODEL_DIR,
                       mobile_model_config.MOBILE_PRODUCT_OHE_NAME, ip)
            os.system(product_push_cmd)

            time.sleep(1)

    def push_reload_model_message(self):
        """
        推送重新加载模型消息
        :return:
        """
        logger.info(
            'pushing reload model message model_date@{}'.format(self.model_date))
        exist_server = mobile_model_config.MODEL_PUSH_SERVERS[0]
        if exist_server == 'no_server':
            logger.info('No server need to push, SKIP!')
            return

        params = {"category": 1, "reload": 1,"data":[[]]}
        base_url = 'http://{}:{}/price/predict'
        for ip in mobile_model_config.MODEL_PUSH_SERVERS:
            rm_cmd = 'ssh root@{} rm -f /data/thy/price_model/api_health/health_check'.format(
                ip)
            os.system(rm_cmd)
            time.sleep(60)
            for port in mobile_model_config.MODEL_SERVER_PORTS:
                request_url = base_url.format(ip, port)
                logger.info('push reload message to {}'.format(request_url))
                # response = requests.post(request_url, json=params)
                # results = json.loads(response.content)
                results = {'code': -1}
                for i in range(1, 4):
                    response = requests.post(request_url, json=params)
                    results = json.loads(response.content)
                    time.sleep(5)
                    if results['code'] == 0:
                        logger.info('push reload message success @time {} '.format(i))
                        break
                    else:
                        logger.info('push reload message fail {} times '.format(i))
                if results['code'] != 0:
                    # 如果有推送reload消息异常，那么停止更新
                    logger.critical(
                        'push reloading message error! {}:{}'.format(ip, port))
                    break
                time.sleep(5)
            time.sleep(30)
            touch_cmd = 'ssh root@{} touch /data/thy/price_model/api_health/health_check'.format(
                ip)
            os.system(touch_cmd)
            time.sleep(10)

    def clear_api_cache(self):
        """
        清除调用接口缓存
        :return:
        """
        if self.ab_price_api_url is None:
            logger.info('ab_price_api_url is None, SKIP!')
        else:
            try:
                response = requests.get(self.ab_price_api_url)
                if response.status_code != 200:
                    logger.critical('手机清除abprice缓存失败!')
                    feishu_messager.send_message('手机清除abprice缓存失败!')
            except Exception as e:
                logger.critical('手机调用清除abprice缓存接口异常：{}'.format(e))
                feishu_messager.send_message(
                    '手机调用清除abprice缓存接口异常：{}'.format(e))

    def clear_models(self):
        """
        清理历史模型文件
        :return:
        """
        logger.info('clearing model files {}'.format(self.model_date))
        clear_timestamp = (get_today(
        ) - datetime.timedelta(days=mobile_model_config.KEEP_MODEL_DAYS)).timestamp()
        model_files = os.listdir(mobile_model_config.MODEL_DIR)
        for file in model_files:
            if file.endswith('pkl') or file.endswith('h5'):
                file_name = os.path.join(mobile_model_config.MODEL_DIR, file)
                create_time = os.path.getctime(file_name)
                if create_time < clear_timestamp:
                    logger.info('removing file@{}'.format(file))
                    os.remove(file_name)

    def cal_history_avg_price(self,to_redis=False):
        """
        计算sku历史平均价格
        :return:
        """
        logger.info('calculate history avg price...')
        history_price_date = self.model_date - datetime.timedelta(
            days=mobile_model_config.MOBILE_HISTORY_AVG_PRICE_DAYS)
        history_avg_price_sql = """
        SELECT product_sku_key, product_level_key, avg(predict_origin) AS avg_price
        from ods.ods_price_prediction_price_prediction 
        WHERE product_category_id = 1 AND cast(predict_date  as date) between cast('{}'  as date) AND cast('{}'  as date)
         and predict_origin>0
        GROUP BY product_sku_key, product_level_key
        """.format(history_price_date.strftime('%Y-%m-%d'),self.model_date.strftime('%Y-%m-%d'))
        history_avg_price = presto_processor.load_sql(history_avg_price_sql)
        self.history_avg_price = history_avg_price

        if to_redis:
            logger.info('saving mobile history price into redis')
            history_avg_price.index = mobile_model_config.MOBILE_HISTORY_AVG_PRICE_PREFIX + \
                history_avg_price['product_sku_key'].astype(
                    str) + '_' + history_avg_price['product_level_key'].astype(str)
            redis_dict = history_avg_price['avg_price'].to_dict()
            # for key, value in redis_dict.items():
            #     redis_conn.set(
            #         key, value, ex=mobile_model_config.MOBILE_HISTORY_CACHE_TIME)
            self.insertInRedisByPipline(redis_dict, 10000)
            logger.info('saving mobile history price into redis done!')

    def insertInRedisByPipline(self,data,batchSize=10000):
        i=0
        pipe=redis_conn.pipeline()
        for k,v in data.items():
            i+=1
            pipe.set(k, v, ex=mobile_model_config.MOBILE_HISTORY_CACHE_TIME)
            if(i%batchSize==0):
                try:
                    pipe.execute()
                except Exception as e:
                    logger.error('pipline save into redis size i={} has error'.format(i))
                # if(i>=200000):
                #     break
                logger.info('pipline save into redis size i={}'.format(i))
        pipe.execute()

    def process_complement_data(self):
        """
        处理补全数据
        :return:
        """
        logger.info('processing complement data...')

        data = self.__load_complement_data()
        baseprice = self.__load_baseprice_data()
        excude_sku=self.load_sku_exclude()
        # 写入pickle
        #complement_pkl = open('complement.pkl', 'wb')
        #pickle.dump(data, complement_pkl)
        #complement_pkl.close()c2b.jg_basepricedatacategoryphone
        # 读取pickle
        # complement_pkl = open('complement.pkl', 'rb')
        # data = pickle.load(complement_pkl)

        data['property'] = data['property'].map(lambda x: x.strip())
        data['property_value'] = data['property_value'].map(lambda x: x.strip())
        data.drop_duplicates(subset=['product_sku_id', 'property'], keep='last', inplace=True)
        data = data.pivot_table(index='product_sku_id', columns='property', values='property_value',
                                aggfunc=lambda x: x).reset_index()
        data.fillna('unknown', inplace=True)

        product_id_data = self.__load_product_data()
        data = pd.merge(data, product_id_data, on='product_sku_id')

        data['product_category_id'] = 1
        data['product_category_name'] = '手机'

        # memory_storage_split = data['memory'].str.split('+', expand=True)
        # if memory_storage_split.shape[1] == 1:
        #     # 如果只分出来一个字段，那么补上一个
        #     memory_storage_split.columns = ['memory_tmp']
        #     memory_storage_split['storage_tmp'] = np.nan
        # else:
        #     memory_storage_split.columns = ['memory_tmp', 'storage_tmp']
        # data = pd.concat([data, memory_storage_split], axis=1, sort=False)
        # data['memory'] = data['memory_tmp']
        # data['storage'] = data['storage'].where(pd.isnull(data['storage_tmp']), data['storage_tmp'])
        #
        # data.loc[data['product_brand_name'] == '苹果', 'memory'] = 'apple_memory'
        # data['color'] = np.where(data['color'] != 'unknown', data['product_name'] + '_X_' + data['color'], 'unknown')
        # data['period'] = '0'
        data = preprocess_model_data(data,is_cal_period=False)
        levels = self.__load_product_levels()
        brands = self.__load_product_brand()
        # 做笛卡尔积
        data['_tmp_key'] = 0
        levels['_tmp_key'] = 0

        data = pd.merge(data, levels, on='_tmp_key', how='outer').drop(columns='_tmp_key')
        self.predict_data = pd.merge(data, brands, on='product_brand_name', how='left')
        self.predict_data.product_brand_id.fillna(-1, inplace=True)
        template_level_mapping = self.__load_template_level_mapping()
        self.predict_data = pd.merge(self.predict_data, template_level_mapping,
                                      on=['secondary_level_template_id', 'product_level_id'])
        self.predict_data = pd.merge(self.predict_data, baseprice,
                                     on=['product_sku_id', 'product_level_id'],how='inner')

        # self.predict_data = self.predict_data.loc[self.predict_data.product_sku_id == 1169415]
        self.predict_data = self.predict_data.loc[~self.predict_data.product_sku_id.isin(excude_sku)]

        self.predict_data['date'] = self.model_date.strftime('%Y-%m-%d')
        self.predict_data.drop_duplicates(subset=['product_sku_id', 'product_level_id'], keep='first', inplace=True)

    def load_sku_exclude(self):
        """
        :return:
        """

        logger.info('loading exclude sku :')
        sku_detection = presto_processor.load_sql(EXCLUDE_SKU_SQL)
        if sku_detection.empty:
            return tuple(['no1', 'no1'])
        # return tuple(anomaly_detection.product_document_no.values)
        # 元组只有一个值时，出现 (1,)
        res = tuple(r[0] for r in sku_detection.itertuples(index=False))
        if len(res) == 1:
            res = list(res)
            res.append(-1)
            res = tuple(res)
        return res

    def __load_complement_data(self):
        """
        读取测试数据
        :return:
        """
        return presto_processor.load_sql(COMPLETE_DATA_SQL)

    def __load_baseprice_data(self):
        data = mysql_prediction_processor.load_sql(BASEPRICE_MOBILE_SQL)
        return data

    def __load_product_data(self):
        """
        读取产品的ID
        :return:
        """
        product_data = presto_processor.load_sql(PRODUCT_SQL)
        product_data['product_level_template_id'] = product_data['product_level_template_id'].astype(str)
        product_data['secondary_level_template_id'] = product_data['secondary_level_template_id'].astype(str)
        return product_data

    def __load_product_levels(self):
        """
        读取等级
        :return:
        """
        return presto_processor.load_sql(LEVEL_SQL)

    def __load_product_brand(self):
        """
        读取等级
        :return:
        """
        return presto_processor.load_sql(BRAND_ID_NAME_SQL)

    def __load_template_level_mapping(self):
        """
        读取等级模板和等级对应关系
        :return:
        """
        mapping = presto_processor.load_sql(LEVEL_TEMPLATE_MAPPING_SQL)
        mapping['secondary_level_template_id'] = mapping['secondary_level_template_id'].astype(str)
        mapping['product_level_id'] = mapping['product_level_id'].astype(int)

        return mapping

    def predict_complement_data_old(self):
        """
        预测补全数据
        :return:
        """
        if self.predict_data is None:
            logger.critical('predict_data is None!')
            return
        if self.model is None:
            self.load_models()

        logger.info('predicting data...')

        self.predict_data['period'] = '0'
        self.predict_data['small_version'] = self.predict_data['small_version'].astype(str)
        test_x_inputs = self.ohe.transform(self.predict_data[mobile_model_config.MOBILE_FEATURES])
        t1 = time.time()
        predict_scaled = self.model.predict(test_x_inputs)
        logger.info('predicting data... done！')
        # 写入pickle
        # predict_pkl = open('predict_scaled.pkl', 'wb')
        # pickle.dump(predict_scaled, predict_pkl)
        # predict_pkl.close()
        # 读取pickle
        # predict_pkl = open('predict_scaled.pkl', 'rb')
        # predict_scaled = pickle.load(predict_pkl)

        self.predict_data['forecast_reference_price'] = np.round(
            self.scaler.inverse_transform(predict_scaled).flatten()).astype(int)
        self.predict_data['predict_origin'] = self.predict_data['forecast_reference_price']
        # 处理和均价的加权
        self.predict_data['key'] = mobile_model_config.MOBILE_HISTORY_AVG_PRICE_PREFIX + \
                                   self.predict_data['product_sku_id'].astype(str) + '_' + self.predict_data[
                                       'product_level_id'].astype(str)

        self.history_avg_price = self.history_avg_price.rename(
            columns={'product_sku_key': 'product_sku_id', 'product_level_key': 'product_level_id',
                     'avg_price': 'history_avg_price'})
        self.predict_data = pd.merge(self.predict_data, self.history_avg_price,
                                     on=['product_sku_id', 'product_level_id'], how='left')
        self.predict_data['history_avg_price'] = self.predict_data['history_avg_price'].where(
            self.predict_data['history_avg_price'].notnull(), self.predict_data['forecast_reference_price'])
        self.predict_data['forecast_reference_price'] = self.predict_data[
                                                            'forecast_reference_price'] * MODEL_PRICE_WEIGHT + \
                                                        self.predict_data[
                                                            'history_avg_price'] * HISTORY_AVG_PRICE_WEIGHT

        self.predict_data['is_new_product'] = self.product_ohe.transform(
            self.predict_data[['product_name']]).sum(axis=1).astype(int)
        self.predict_data['is_new_product'] = self.predict_data['is_new_product'].replace({0: 1, 1: 0})

        # 处理环保机报价
        # env_protect = (self.predict_data['product_level_template_id'] == mobile_model_config.EVN_TEMPLATE_ID) \
        #               & (self.predict_data['is_new_product'] == 1)
        env_protect = self.predict_data['product_level_template_id'] == mobile_model_config.EVN_TEMPLATE_ID
        self.predict_data.loc[env_protect, 'forecast_reference_price'] = self.predict_data.loc[
            env_protect, 'forecast_reference_price'].where(
            self.predict_data.loc[env_protect, 'forecast_reference_price'] <= MAX_EVN_PRICE, MAX_EVN_PRICE)

        # 处理最小值
        self.predict_data['forecast_reference_price'] = self.predict_data['forecast_reference_price'].where(
            self.predict_data['forecast_reference_price'] >= 5, 5)
        self.predict_data['lastprice'] = self.predict_data['forecast_reference_price']
        self.predict_data['y_pred'] = self.predict_data['forecast_reference_price']
        logger.info('predicting data done')


    def predict_complement_data(self):
        """
        预测补全数据
        :return:
        """
        if self.predict_data is None:
           logger.critical('predict_data is None!')
           return
        if self.model is None:
            self.load_models()

        logger.info('predicting data...')

        self.predict_data['period'] = '0'
        self.predict_data['small_version'] = self.predict_data['small_version'].astype(str)
        test_x_inputs = self.ohe.transform(self.predict_data[mobile_model_config.MOBILE_FEATURES])
        t1 = time.time()
        predict_scaled = self.model.predict(test_x_inputs)
        logger.info('predicting data... done！')
        #写入pickle
        #predict_pkl = open('predict_scaled.pkl', 'wb')
        #pickle.dump(predict_scaled, predict_pkl)
        #predict_pkl.close()
        # 读取pickle
        # predict_pkl = open('predict_scaled.pkl', 'rb')
        # predict_scaled = pickle.load(predict_pkl)

        # self.predict_data['forecast_reference_price'] = np.round(
        #      self.scaler.inverse_transform(predict_scaled).flatten()).astype(int)
        self.predict_data['forecast_reference_price'] = np.round(
            predict_scaled.flatten(-1)).astype(int)
        self.predict_data['predict_origin'] = self.predict_data['forecast_reference_price']
            # 处理和均价的加权
        self.predict_data['key'] = mobile_model_config.MOBILE_HISTORY_AVG_PRICE_PREFIX + \
                              self.predict_data['product_sku_id'].astype(str) + '_' + self.predict_data['product_level_id'].astype(str)

        self.history_avg_price = self.history_avg_price.rename(columns={'product_sku_key': 'product_sku_id','product_level_key':'product_level_id',
                                                                        'avg_price':'history_avg_price'})
        self.predict_data = pd.merge(self.predict_data,self.history_avg_price,on=['product_sku_id','product_level_id'],how='left')
        self.predict_data['history_avg_price'] = self.predict_data['history_avg_price'].where(
            self.predict_data['history_avg_price'].notnull(), self.predict_data['forecast_reference_price'])
        self.predict_data['forecast_reference_price'] = self.predict_data['forecast_reference_price'] * MODEL_PRICE_WEIGHT + \
                                self.predict_data['history_avg_price'] * HISTORY_AVG_PRICE_WEIGHT

        self.predict_data['is_new_product'] = self.product_ohe.transform(
            self.predict_data[['product_name']]).sum(axis=1).astype(int)
        self.predict_data['is_new_product'] = self.predict_data['is_new_product'].replace({0: 1, 1: 0})

        # 处理环保机报价
        # env_protect = (self.predict_data['product_level_template_id'] == mobile_model_config.EVN_TEMPLATE_ID) \
        #               & (self.predict_data['is_new_product'] == 1)
        env_protect = self.predict_data['product_level_template_id'] == mobile_model_config.EVN_TEMPLATE_ID
        self.predict_data.loc[env_protect, 'forecast_reference_price'] = self.predict_data.loc[
            env_protect, 'forecast_reference_price'].where(
            self.predict_data.loc[env_protect, 'forecast_reference_price'] <= MAX_EVN_PRICE, MAX_EVN_PRICE)

        # 处理最小值


        self.predict_data.loc[(self.predict_data['forecast_reference_price'] < 5) & (self.predict_data['saleprice'] > 5),'forecast_reference_price'] =\
            self.predict_data.loc[(self.predict_data['forecast_reference_price'] < 5) & (self.predict_data['saleprice'] > 5),'saleprice']
        self.predict_data['forecast_reference_price'] = self.predict_data['forecast_reference_price'].where(
            self.predict_data['forecast_reference_price'] >= 5, 5)
        self.predict_data['lastprice']=self.predict_data['forecast_reference_price']
        # self.predict_data['y_pred'] = self.predict_data['forecast_reference_price']
        self.predict_data.loc[(env_protect)&self.predict_data.saleprice>0,'lastprice']=self.predict_data.loc[(env_protect)&self.predict_data.saleprice>0,'saleprice']
        self.predict_data.loc[(env_protect)&self.predict_data.saleprice>0,'forecast_reference_price']=self.predict_data.loc[(env_protect)&self.predict_data.saleprice>0,'saleprice']
        logger.info('predicting data done')

    def save_complement_data(self):
        """
        保存补全数据
        :return:
        """
        if self.predict_data is not None:
            logger.info('saving predict data...')
            dt = datetime.datetime.strptime(time.strftime('%Y-%m-%d'), '%Y-%m-%d') + datetime.timedelta(days=1)
            self.predict_data['date'] = dt.strftime('%Y-%m-%d')
            delete_sql=""" delete from price_prediction_mobile_bi_price where date=date '{}'""".format(dt.strftime('%Y-%m-%d'))
            mysql_prediction_processor.execute_sql(delete_sql)

            insert_sql = """
            INSERT INTO price_prediction_mobile_bi_price(date, product_sku_key, product_sku_name, product_level_key, 
            product_level_name, product_key, product_name, product_category_id, product_category_name, product_brand_id,
            product_brand_name, predict_origin, forecast_reference_price,is_new_product,POLY_pred_price)
            VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,  %s,%s, %s, %s)
            """
            mysql_prediction_processor.execute_insert_sql(insert_sql,
                                               self.predict_data[
                                                   ['date', 'product_sku_id', 'product_sku_name', 'product_level_id',
                                                    'product_level_name', 'product_id', 'product_name',
                                                    'product_category_id', 'product_category_name','product_brand_id',
                                                    'product_brand_name', 'predict_origin', 'forecast_reference_price','is_new_product',
                                                    'y_pred']
                                               ].to_records(index=False).tolist())
            logger.info('saving predict data to mysql done')
            self.mysql_price_num = len(self.predict_data)

    def save_complement_data_old(self):
        """
        保存补全数据
        :return:
        """
        if self.predict_data is not None:
            logger.info('saving predict data...')
            dt = datetime.datetime.strptime(time.strftime('%Y-%m-%d'), '%Y-%m-%d') + datetime.timedelta(days=1)
            self.predict_data['date'] = dt.strftime('%Y-%m-%d')
            insert_sql = """
            INSERT INTO price_prediction_c_sku_mobile_test(date, product_sku_key, product_sku_name, product_level_key, 
            product_level_name, product_key, product_name, product_category_id, product_category_name, product_brand_id,
            product_brand_name, predict_origin, forecast_reference_price,is_new_product,POLY_pred_price)
            VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,  %s,%s, %s, %s)
            """
            mysql_prediction_processor.execute_insert_sql(insert_sql,
                                               self.predict_data[
                                                   ['date', 'product_sku_id', 'product_sku_name', 'product_level_id',
                                                    'product_level_name', 'product_id', 'product_name',
                                                    'product_category_id', 'product_category_name','product_brand_id',
                                                    'product_brand_name', 'predict_origin', 'forecast_reference_price','is_new_product',
                                                    'y_pred']
                                               ].to_records(index=False).tolist())
            logger.info('saving predict data to mysql done')
            self.mysql_price_num = len(self.predict_data)


    def _cal_Performance(self, y_true, y_pred, name_space=['mae','mse','explained_variance_score','r2_score']):
        '''
        Input
        y_true：真实的数据值
        y_pred：回归模型预测的数据值
        explained_variance_score：解释回归模型的方差得分，其值取值范围是[0,1]，越接近于1说明自变量越能解释因变量
        的方差变化，值越小则说明效果越差。
        mean_absolute_error：平均绝对误差（Mean Absolute Error，MAE），用于评估预测结果和真实数据集的接近程度的程度
        ，其其值越小说明拟合效果越好。
        mean_squared_error：均方差（Mean squared error，MSE），该指标计算的是拟合数据和原始数据对应样本点的误差的
        平方和的均值，其值越小说明拟合效果越好。
        r2_score：判定系数，其含义是也是解释回归模型的方差得分，其值取值范围是[0,1]，越接近于1说明自变量越能解释因
        变量的方差变化，值越小则说明效果越差。

        Return
        Python Dictionary
        '''
        from sklearn.metrics import mean_absolute_error, mean_squared_error, explained_variance_score, r2_score
        model_metrics_name = {'mae':mean_absolute_error, 'mse': mean_squared_error,
                      'explained_variance_score': explained_variance_score, 'r2_score': r2_score}
        mertics_result = {}

        for name,func in model_metrics_name.items():
            if name in name_space:
                tmp_score = func(y_true,y_pred)
                mertics_result[name] = tmp_score

        return mertics_result

    def smooth_strategy(self, use_template=False, sw_balance=True, balance_rule='stat', N_Degree=10):
        '''
        预测分调整
        Input
        use_template: 是否使用templateid作为模型拟合主体
        '''
        df_data_all = self.predict_data.copy(deep=True)
        smooth_processor=intelligent_data_smooth(df_data_all,df_data_all)
        smooth_processor.smooth_strategy()
        self.predict_data=smooth_processor.predict_data
        logger.info('loading smooth model data end!')



    def launch_model(self):
        """
        启动模型过程
        :return:
        # """
        self.load_model_data()
        # self.eliminate_anomaly()
        # self.detect_anomaly()
        # # 在当日异常数据处理完成后，再次调用排除异常的方法来排除当日异常数据
        # self.eliminate_anomaly()
        self.train_model()
        self.evaluate_model()
        self.save_model()
        self.cal_history_avg_price()
        self.load_models()
        self.process_complement_data()
        self.predict_complement_data()
        # self.predict_complement_data_old()
        # self.smooth_strategy()
        self.predict_data['y_pred']=self.predict_data['forecast_reference_price']

        self.save_complement_data()
        # self.save_complement_data_old()

        #self.push_model()
        #self.push_reload_model_message()
        #self.clear_api_cache()
        #self.clear_models()
