#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   laptop_train_model.py    
@Contact :   pengwei.sun@aihuishou.com
@License :   (C)Copyright aihuishou

@Modify Time      @Author       @Version    @Desciption
------------      -----------   --------    -----------
2021-11-26 18:30   pengwei.sun      1.0         None
'''

import datetime
import json
import os
import pickle
import time
import datetime
import os
import sys
import math
sys.path.append(os.getcwd())
import numpy as np
import pandas as pd
import requests
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from tensorflow.python.keras import regularizers
from src.laptop.anomaly import anomaly_file_loader as af_loader
from src.laptop.anomaly.anomaly_detect import LaptopAnomalyDetector
from src.laptop.anomaly.anomaly_load import load_anomaly_document_item
# from src.laptop.anomaly.anomaly_trend_detect import LaptopAnomalyTrendDetector
from src.laptop.model import laptop_model_config
from src.laptop.preprocessor.fish_product_data_loader import preprocess_fish_product
from src.laptop.preprocessor.settle_data_loader import preprocess_settle_data
from src.laptop.preprocessor.model_data_preprocessor import preprocess_model_data, random_ratio
from src.utils.config import logger, config
from src.utils.db_processor import mysql_prediction_processor, mysql_price_model, presto_processor
from src.utils.feishu_message import feishu_messager
from src.utils.redis_pool import redis_conn
from src.utils.util import check_conflict_file, get_today, format_date_string
from src.laptop.model.laptop_model_config import MODEL_FEATURES
from src.utils.model_utils import getMape, custom_mean_absolute_percentage
from src.laptop.job.sku_price_complement import COMPLETE_DATA_SQL, LEVEL_SQL, \
    LEVEL_TEMPLATE_MAPPING_SQL, BRAND_ID_NAME_SQL
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from src.laptop.predict import laptop_predict_config

import tensorflow as tf
from tensorflow import keras
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.backend import set_session
from tensorflow.python.ops import math_ops
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials

graph = tf.compat.v1.get_default_graph()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
session_config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
session_config.gpu_options.per_process_gpu_memory_fraction = 0.6  # 占用GPU90%的显存
session_config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=session_config)

log_filepath = 'logs/keras_log'
iters = 0
inf = sys.float_info.min


def reloadGraph():
    graph = tf.reset_default_graph()
    graph = tf.compat.v1.get_default_graph()
    session_config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
    session_config.gpu_options.per_process_gpu_memory_fraction = 0.6  # 占用GPU90%的显存
    session_config.gpu_options.allow_growth = True
    sess = tf.compat.v1.Session(config=session_config)


def repeatData(df, size=3):
    X = df
    X = X.reset_index()
    # ind = np.arange(X.shape[0])
    dfsize = X.shape[0]
    logger.info('dfsize={}'.format(dfsize))
    ind = np.random.randint(0, high=dfsize, size=dfsize * (size - 1))
    for i in range(7):
        np.random.shuffle(ind)
    repeatDf = X.loc[ind]
    samples_offset = random_ratio(dfsize * (size - 1))
    repeatDf.item_quotation_price_num = repeatDf.item_quotation_price_num * samples_offset
    return repeatDf


class LaptopModel:
    """
    笔记本价格模型构建过程
    1. 读取结算数据
    2. 排除异常数据
    3. 检测当日出货物品异常数据
    4. 训练模型
    5. 检测价格趋势异常数据
    6. 处理钓鱼物品
    7. 评估模型
    8. 保存模型
    9. 推送模型
    10. 调用清除缓存接口
    """

    def __init__(self, model_date=None):
        """

        :param model_date: 建模日期
        """
        if model_date is None:
            self.model_date = get_today()
        else:
            self.model_date = model_date

        self.model_data = None
        self.model = None
        self.train_history = None
        self.mysql_price_num = 0
        # 特征变量One-Hot编码处理器
        self.ohe = OneHotEncoder(handle_unknown='ignore')
        # 价格标准化
        # self.scaler = StandardScaler()
        # 模型推送文件列表
        self.push_model_file_list = []
        # api缓存清除url
        self.ab_price_api_url = config.get_config(
            'ab_price_api', 'cache_clear_url')
        self.history_avg_price = pd.DataFrame({'product_sku_key': [], 'product_level_key': [], 'avg_price': []})

    def load_model_data(self):
        """
        读取建模数据
        :return:
        """
        self.model_data, self.product_ohe = preprocess_settle_data(model_date=self.model_date)
        logger.info('model_data shape@{}'.format(self.model_data.shape))

    # 数据乱序
    def shuffle(self):
        X = self.model_data
        ind = np.arange(X.shape[0])
        for i in range(7):
            np.random.shuffle(ind)
        return X.loc[ind]

    def eliminate_anomaly(self):
        """
        排除异常数据
        :return:
        """
        logger.info('eliminating anomaly...')
        # 异常数据文件
        scalping_document_id = af_loader.load_scalping_document_id()
        scalping_product_no = af_loader.load_scalping_product_no()
        self.model_data = self.model_data[~self.model_data['document_item_id'].isin(
            scalping_document_id)]
        self.model_data = self.model_data[~self.model_data['product_no'].isin(
            scalping_product_no)]
        # 数据库异常数据
        anomaly_document_item = load_anomaly_document_item()
        if not anomaly_document_item.empty:
            logger.info('eliminating anomaly products size@{}'.format(
                anomaly_document_item.shape))
            self.model_data = self.model_data[~self.model_data['document_item_id'].isin(
                anomaly_document_item['document_item_id'])]

    def weight_function(self, s, relative_date=None, max_period=90):
        """
        计算周期，30天为一周期
        :param s: 日期序列
        :param relative_date: 当前日期，默认为当天日期
        :param max_period: 最大周期数
        :param interval: 周期间隔
        :return:
        """
        if relative_date is None:
            relative_date = get_today()
        diff_days = relative_date - s
        diff_days = diff_days.days
        period = max_period - diff_days
        return min(max(period, 1), 90) / 3.0

    def extract_training_data(self):
        logger.info("extract_training_data is begin !")
        if self.model_data is None:
            logger.critical('model_data is None!')
            return

        # 数据乱序处理
        self.model_data = self.shuffle()
        self.model_data['weight'] = self.model_data['settle_list_create_date'].apply(self.weight_function)
        logger.info('extract_training_data done!')

        # 近期数据
        recent_date = self.model_date - \
                      datetime.timedelta(days=laptop_model_config.RECENT_DAYS)

        x_test = self.model_data[self.model_data['settle_list_create_date'] >= recent_date]
        y_test = x_test['item_quotation_price_num']

        test_x_inputs = self.ohe.fit_transform(
            x_test[laptop_model_config.MODEL_FEATURES])

        self.test_x_inputs = test_x_inputs
        self.test_y_scaled = y_test
        self.weight_test_y_input = x_test['weight']

    def train_model(self, params):
        """
        训练模型
        :return:
        """

        # os.system("echo 1 > /proc/sys/vm/drop_caches")

        logger.info('training model...')
        self.build_model(self.test_x_inputs.shape[1], params)

        tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath,  # log 目录
                                            histogram_freq=0,  # 按照何等频率（epoch）来计算直方图，0为不计算
                                            #                  batch_size=32,     # 用多大量的数据计算直方图
                                            write_graph=True,  # 是否存储网络结构图
                                            write_grads=True,  # 是否可视化梯度直方图
                                            write_images=True,  # 是否可视化参数
                                            embeddings_freq=0,
                                            embeddings_layer_names=None,
                                            embeddings_metadata=None)

        # 设置log的存储位置，将网络权值以图片格式保持在tensorboard中显示，设置每一个周期计算一次网络的

        x_train, x_valid, y_train, y_valid = train_test_split(self.model_data,
                                                              self.model_data['item_quotation_price_num'],
                                                              test_size=0.2)
        train_x_inputs = self.ohe.transform(
            x_train[laptop_model_config.MODEL_FEATURES_NEW])

        valid_x_inputs = self.ohe.transform(
            x_valid[laptop_model_config.MODEL_FEATURES_NEW])

        # train_y_scaled = self.scaler.transform(
        #     y_train.values.reshape((-1, 1))).flatten()
        train_y_scaled = y_train

        # valid_y_scaled = self.scaler.transform(
        #     y_valid.values.reshape((-1, 1))).flatten()
        valid_y_scaled = y_valid
        weight_train_y_input = x_train['weight']
        weight_valid_y_input = x_valid['weight']

        # 权值，每层输出值的分布直方图
        cbks = [tb_cb, laptop_model_config.early_stop]
        self.train_history = self.model.fit(train_x_inputs, train_y_scaled,
                                            validation_data=(
                                                valid_x_inputs, valid_y_scaled, weight_valid_y_input),
                                            epochs=20, verbose=1, batch_size=params['units4'],
                                            callbacks=cbks, sample_weight=np.array(weight_train_y_input))

        score = self.model.evaluate(self.test_x_inputs, self.test_y_scaled, verbose=0,
                                    sample_weight=np.array(self.weight_test_y_input))

        test_mape = -1
        test_mape1 = -1
        try:
            recent_predict_data = self.model.predict(self.test_x_inputs)
            recent_predict_data = [x[0] for x in recent_predict_data]
            margin = np.abs(
                (np.clip(recent_predict_data, 5, None) - np.clip(self.test_y_scaled, 5, None)))
            test_mape = np.sqrt(np.mean(np.square(margin)))

            test_mape1 = np.mean(100. * np.abs(
                (np.clip(recent_predict_data, 5, None) - np.clip(self.test_y_scaled, 5, None))
                /
                np.clip(self.test_y_scaled, 5, None)
            )
                                )


        except Exception as e:
            logger.info('test_mape is exception!')

        import sys
        sys.stdout.flush()
        global iters
        iters += 1
        logger.info(str(self.train_history.history))
        logger.info('iters= {},score={},train_loss={},val_loss={},rmse={},test_mape1={},params={},'.format(iters, str(score),
                                                                                                  self.train_history.history[
                                                                                                      'custom_mean_absolute_percentage'][
                                                                                                      -1],
                                                                                                  self.train_history.history[
                                                                                                      'val_custom_mean_absolute_percentage'][
                                                                                                      -1],
                                                                                                  str(test_mape),str(test_mape1),
                                                                                                  str(params)))
        return {'accuracy': np.sqrt(score[1]), 'status': STATUS_OK, 'loss': np.sqrt(score[1])}

    def hyper_train_model(self):
        space = {
            # 'window': hp.choice('window',[30, 60, 120, 180]),
            'units1': hp.choice('units11', [1024, 1280, 2048]),
            'units1': hp.choice('units11', [512, 1024, 1280, 2048]),
            'units2': hp.choice('units22', [256, 512, 1024,1280, 2048]),
            'units3': hp.choice('units33', [256, 512, 1280, 2048]),
            'units4': hp.choice('units34', [256, 512, 1024]),

            # 'lr': hp.choice('lr',[0.01, 0.001, 0.0001]),
            'lr1': hp.choice('lr1', np.arange(0.001, 0.4, 0.001)),
            'lr2': hp.choice('lr2', np.arange(0.001, 0.4, 0.001)),
            'lr3': hp.choice('lr3', np.arange(0.001, 0.4, 0.001)),
            'lr4': hp.choice('lr4', np.arange(0.001, 0.3, 0.001)),
            'lr5': hp.choice('lr5', np.arange(0.01, 0.5, 0.01)),

            # 'units1': hp.choice('units11', [128, 256, 512, 1024, 1280, 2048]),
            # 'units2': hp.choice('units22', [128, 256, 512, 1024, 1280, 2048]),
            # 'units3': hp.choice('units33', [16, 32, 64, 128, 256, 512, 1024, 1280, 2048]),
            # 'units4': hp.choice('units34', [256, 512, 1024]),
            #
            # # 'lr': hp.choice('lr',[0.01, 0.001, 0.0001]),
            # 'lr1': hp.choice('lr1', np.arange(0.001, 0.5, 0.001)),
            # 'lr2': hp.choice('lr2', np.arange(0.001, 0.5, 0.001)),
            # 'lr3': hp.choice('lr3', np.arange(0.001, 0.5, 0.001)),
            # 'lr4': hp.choice('lr4', np.arange(0.001, 0.5, 0.001)),
            # 'lr5': hp.choice('lr5', np.arange(0.001, 0.5, 0.001)),

            'activation': hp.choice('activations', ['relu', tf.nn.leaky_relu])
            # ,
            # 'loss': hp.choice('lossss', [keras.losses.categorical_crossentropy,
            #                              # keras.losses.mse,
            #                              # keras.losses.mae,
            #                              keras.losses.binary_crossentropy])
        }
        trials = Trials()
        best = fmin(self.train_model, space, algo=tpe.suggest, max_evals=200, trials=trials)
        logger.info('best {}', best)

    def build_model(self, input_shape, params):
        """
        构建模型
        :param input_shape: 输入特征数量
        :return:
        """
        reloadGraph()
        self.model = keras.Sequential()

        self.model.add(keras.layers.Dense(params['units1'], activation=params['activation'],
                                          kernel_regularizer=regularizers.l2(params['lr1']),
                                          kernel_initializer='normal',
                                          # activity_regularizer=regularizers.l1(0.005),
                                          input_shape=(input_shape,)))
        # self.model.add(keras.layers.Dropout(0.4))
        self.model.add(keras.layers.Dense(params['units2'], activation=params['activation'],
                                          kernel_regularizer=regularizers.l2(params['lr2'])
                                          # ,activity_regularizer = regularizers.l1(0.005)
                                          , kernel_initializer='normal'
                                          ))
        # self.model.add(keras.layers.Dropout(0.3))
        self.model.add(keras.layers.Dense(params['units3'], activation=params['activation'],
                                          kernel_regularizer=regularizers.l2(params['lr3'])
                                          # ,activity_regularizer=regularizers.l1(0.003)
                                          , kernel_initializer='normal'
                                          ))
        # self.model.add(keras.layers.Dropout(0.2))
        self.model.add(
            keras.layers.Dense(1, kernel_regularizer=regularizers.l1_l2(l1=params['lr4'], l2=params['lr5']),
                               activation='linear'))
        # self.model.summary()
        self.model.compile(loss='mse',
                           optimizer=keras.optimizers.Adam()
                           , metrics=['mse',custom_mean_absolute_percentage]
                           )
        logger.info('模型初始化完成！')

    # def build_model(self, input_shape):
    #     """
    #     构建模型
    #     :param input_shape: 输入特征数量
    #     :return:
    #     """
    #     inputs = keras.Input(shape=(input_shape,))
    #
    #     dense = keras.layers.Dense(128, activation='relu')(inputs)
    #     dense = keras.layers.Dense(64, activation='relu')(dense)
    #     dense = keras.layers.Dense(32, activation='relu')(dense)
    #     outputs = keras.layers.Dense(1, activation='linear')(dense)
    #
    #     self.model = keras.Model(inputs=inputs, outputs=outputs)
    #
    #     self.model.compile(optimizer=keras.optimizers.Adam(), loss='mse', metrics=[custom_mean_absolute_percentage,'mse'])

    # def train_model(self):
    #     """
    #     训练模型
    #     :return:
    #     """
    #     if self.model_data is None:
    #         logger.critical('model_data is None!')
    #         return
    #     # 数据乱序处理
    #     self.model_data = self.shuffle()
    #
    #     # 近期数据
    #     recent_date = self.model_date - \
    #                   datetime.timedelta(days=laptop_model_config.RECENT_DAYS)
    #     recent2_date = self.model_date - \
    #                   datetime.timedelta(days=2)
    #
    #     recent1_date = self.model_date - \
    #                   datetime.timedelta(days=1)
    #
    #
    #     recent_data = self.model_data[self.model_data['settle_list_create_date'] >= recent_date]
    #     self.model_data = self.model_data.reset_index(drop=True)
    #     self.model_data['weight'] = self.model_data['settle_list_create_date'].apply(self.weight_function)
    #
    #     max_weight=self.model_data['weight'].max()
    #     min_weight=self.model_data['weight'].min()
    #     # self.model_data['weight']=1
    #     recent_data = self.model_data[self.model_data['settle_list_create_date'] >= recent_date]
    #     repeatDf=repeatData(recent_data,size=3)
    #     self.model_data=self.model_data.append(repeatDf.drop(columns='index'),ignore_index=True)
    #     self.model_data = self.model_data.reset_index(drop=True)
    #
    #     recent_data = recent_data[recent_data['settle_list_create_date'] >= recent2_date]
    #     repeatDf=repeatData(recent_data,size=3)
    #     self.model_data = self.model_data.append(repeatDf.drop(columns='index'), ignore_index=True)
    #     self.model_data = self.model_data.reset_index(drop=True)
    #
    #     recent_data = recent_data[recent_data['settle_list_create_date'] >= recent1_date]
    #     repeatDf = repeatData(recent_data, size=3)
    #     self.model_data = self.model_data.append(repeatDf.drop(columns='index'), ignore_index=True)
    #     self.model_data = self.model_data.reset_index(drop=True)
    #
    #     logger.info('train mobilie datasize={},max_weight={};min_weight={}'.format(self.model_data.shape[0],max_weight,min_weight))
    #     # self.model_data=self.model_data[self.model_data['settle_list_create_date'] < recent_date]
    #     self.model_data=self.model_data
    #     self.model_data=self.model_data.loc[self.model_data.level_id>0]
    #     # self.model_data.fillna(0, inplace=True)
    #     x_train = self.model_data.drop(columns='item_quotation_price_num')
    #
    #     train_x_inputs = self.ohe.fit_transform(
    #         x_train[laptop_model_config.MODEL_FEATURES])
    #
    #     recent_x_inputs = self.ohe.transform(
    #
    #         recent_data[laptop_model_config.MODEL_FEATURES])
    #     train_y_scaled = self.model_data['item_quotation_price_num']
    #
    #     recent_y_scaled=recent_data['item_quotation_price_num']
    #
    #     logger.info('training model...')
    #
    #     self.build_model(train_x_inputs.shape[1])
    #     self.train_history = self.model.fit(train_x_inputs, train_y_scaled,
    #                                         epochs=100, verbose=1, batch_size=1024,
    #                                         callbacks=laptop_model_config.model_callbacks,
    #                                         sample_weight=np.array(x_train['weight']),use_multiprocessing=True,workers=10)
    #     self.score = self.model.evaluate(recent_x_inputs, recent_y_scaled, verbose=1,
    #                                 sample_weight=np.array(recent_data['weight']))
    #     test_mape = -1
    #     try:
    #
    #         recent_predict_data=self.model.predict(recent_x_inputs)
    #         test_mape= getMape(recent_predict_data,recent_y_scaled)
    #
    #         logger.info('history mape_model@{},score@{} '.format( test_mape,self.score))
    #
    #
    #     except Exception as e:
    #         logger.info('test_mape is exception!')
    #
    #     logger.info('model training done!')

    def process_fish_product(self):
        """
        处理"钓鱼"物品，用于监测模型价格变化趋势
        :return:
        """
        if self.model is None:
            logger.critical('model is None!')
            return

        fish_product = preprocess_fish_product()
        if fish_product.empty:
            logger.info('fish product is empty, skip...')
            return

        logger.info('process fish product size@{}'.format(fish_product.shape))
        fish_product['period'] = '0'
        fish_product['trace_date'] = format_date_string(get_today())
        x_fish_product = self.ohe_product.transform(
            fish_product[laptop_model_config.PRODUCT_FEATURES])
        x_fish_level = self.ohe_level.transform(
            fish_product[laptop_model_config.LEVEL_FEATURES])
        x_fish_attr = self.ohe_attr.transform(
            fish_product[laptop_model_config.ATTR_FEATURES])
        x_fish_period = self.ohe_period.transform(
            fish_product[laptop_model_config.PERIOD_FEATURES])

        predict_price = self.model.predict(
            [x_fish_product, x_fish_level, x_fish_attr, x_fish_period])
        fish_product['price'] = np.round(self.scaler.inverse_transform(
            predict_price.reshape((-1, 1))).flatten())
        trace_sql = """
        REPLACE INTO laptop_fish_product_price_trace(fish_id, trace_date, price)
        VALUES(%s, %s, %s)
        """
        mysql_prediction_processor.execute_insert_sql(trace_sql, fish_product[
            ['fish_id', 'trace_date', 'price']].to_records(index=False).tolist())

    def evaluate_model(self):
        """
        评估模型
        :return:
        """
        if self.train_history is None:
            logger.critical('train_history is None!')
            return

        epoch = self.train_history.epoch[-1]
        # 乘以方差来计算实际的loss
        train_loss = np.round(
            np.sqrt(self.train_history.history['loss'][-1] * self.scaler.var_[0]), 4)
        val_loss = np.round(
            np.sqrt(self.train_history.history['val_loss'][-1] * self.scaler.var_[0]), 4)
        data_size = self.train_history.params['samples']

        logger.info('saving model evaluation...')
        evaluation_sql = """
        REPLACE INTO laptop_price_model_evaluation(model_date, epoch, train_loss, val_loss, data_size)
        VALUES(%s, %s, %s, %s, %s)
        """
        evaluation_data = [(format_date_string(self.model_date), epoch, float(
            train_loss), float(val_loss), data_size)]
        mysql_prediction_processor.execute_insert_sql(
            evaluation_sql, evaluation_data)

    def save_model(self):
        """
        保存模型与预处理器
        :return:
        """
        if self.model is None:
            logger.critical('model is None!')
            return

        logger.info('saving model...')

        check_conflict_file(laptop_model_config.MODEL_DIR,
                            laptop_model_config.MODEL_FILE_NAME)
        model_file = os.path.join(
            laptop_model_config.MODEL_DIR, laptop_model_config.MODEL_FILE_NAME)
        self.model.save(model_file)
        # self.push_model_file_list.append(model_file)

        # self.__save_preprocess(self.product_ohe, 'ohe_product')
        # self.__save_preprocess(self.ohe_level, 'ohe_level')
        # self.__save_preprocess(self.ohe_attr, 'ohe_attr')
        # self.__save_preprocess(self.ohe_period, 'ohe_period')
        # self.__save_preprocess(self.scaler, 'scaler')
        # check_conflict_file(laptop_model_config.MODEL_DIR,
        #                     laptop_model_config.MODEL_FILE_NAME)
        # model_file = os.path.join(
        #     laptop_model_config.MODEL_DIR, laptop_model_config.MODEL_FILE_NAME)
        # self.model.save(model_file)

        # check_conflict_file(laptop_model_config.MODEL_DIR,
        #                     laptop_model_config.MODEL_OHE_NAME)
        # with open(os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.MODEL_OHE_NAME), 'wb') as f:
        #     pickle.dump(self.ohe, f)
        check_conflict_file(laptop_model_config.MODEL_DIR,
                            laptop_model_config.MODEL_OHE_NAME)
        with open(os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.MODEL_OHE_NAME), 'wb') as f:
            pickle.dump(self.ohe, f)

        check_conflict_file(laptop_model_config.MODEL_DIR,
                            laptop_model_config.MODEL_PRODUCT_OHE_NAME)
        with open(os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.MODEL_PRODUCT_OHE_NAME), 'wb') as f:
            pickle.dump(self.product_ohe, f)

    def push_model(self):
        """
        推送模型文件
        :return:
        """
        logger.info(
            'pushing models to remote server model_date@{}'.format(self.model_date))
        exist_server = laptop_model_config.MODEL_PUSH_SERVERS[0]
        if exist_server == 'no_server':
            logger.info('No server need to push, SKIP!')
            return

        for ip in laptop_model_config.MODEL_PUSH_SERVERS:
            logger.info('push server to server@{}'.format(ip))
            model_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models/laptop/'. \
                format(laptop_model_config.MODEL_DIR,
                       laptop_model_config.MODEL_FILE_NAME, ip)
            os.system(model_push_cmd)

            preprocessor_push_cmd = 'scp {}*.pkl root@{}:/data/thy/price_model/models/laptop/'. \
                format(laptop_model_config.MODEL_DIR, ip)
            os.system(preprocessor_push_cmd)
            time.sleep(1)

    def push_model_monitor(self):
        """
        推送模型文件
        :return:
        """
        logger.info(
            'pushing models to remote server model_date@{}'.format(self.model_date))
        exist_server = laptop_model_config.MODEL_PUSH_SERVERS_NONTIOR[0]
        if exist_server == 'no_server':
            logger.info('No server need to push, SKIP!')
            return

        for ip in laptop_model_config.MODEL_PUSH_SERVERS_NONTIOR:
            logger.info('push server to server@{}'.format(ip))
            model_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models/laptop/'. \
                format(laptop_model_config.MODEL_DIR,
                       laptop_model_config.MODEL_FILE_NAME, ip)
            os.system(model_push_cmd)

            preprocessor_push_cmd = 'scp {}*.pkl root@{}:/data/thy/price_model/models/laptop/'. \
                format(laptop_model_config.MODEL_DIR, ip)
            os.system(preprocessor_push_cmd)
            time.sleep(1)

    def push_gc_model(self):
        """
        推送模型文件
        :return:
        """
        logger.info(
            'pushing models to remote server model_date@{}'.format(self.model_date))
        exist_server = laptop_model_config.MODEL_PUSH_SERVERS[0]
        if exist_server == 'no_server':
            logger.info('No server need to push, SKIP!')
            return

        for ip in laptop_model_config.MODEL_PUSH_SERVERS:
            logger.info('push server to server@{}'.format(ip))
            model_push_cmd = 'scp {}{} root@{}:/data/thy/price_model/models_gc/laptop/'. \
                format(laptop_model_config.MODEL_DIR,
                       laptop_model_config.MODEL_FILE_NAME, ip)
            os.system(model_push_cmd)

            preprocessor_push_cmd = 'scp {}*.pkl root@{}:/data/thy/price_model/models_gc/laptop/'. \
                format(laptop_model_config.MODEL_DIR, ip)
            os.system(preprocessor_push_cmd)
            time.sleep(1)

    def push_reload_model_message(self):
        """
        推送重新加载模型消息
        :return:
        """
        logger.info(
            'pushing reload model message model_date@{}'.format(self.model_date))
        exist_server = laptop_model_config.MODEL_PUSH_SERVERS[0]
        if exist_server == 'no_server':
            logger.info('No server need to push, SKIP!')
            return

        params = {"category": 5, "reload": 1, "data": [[]]}
        base_url = 'http://{}:{}/price/predict'
        for ip in laptop_model_config.MODEL_PUSH_SERVERS:
            rm_cmd = 'ssh root@{} rm -f /data/thy/price_model/api_health/health_check'.format(
                ip)
            os.system(rm_cmd)
            time.sleep(60)
            results = {'code': -1}
            for port in laptop_model_config.MODEL_SERVER_PORTS:
                request_url = base_url.format(ip, port)
                logger.info('push reload message to {}'.format(request_url))
                for i in range(1, 4):
                    response = requests.post(request_url, json=params)
                    results = json.loads(response.content)
                    time.sleep(5)
                    if results['code'] == 0:
                        logger.info('push reload message success @time {} '.format(i))
                        break
                    else:
                        logger.info('push reload message fail {} times '.format(i))

                if results['code'] != 0:
                    # 如果有推送reload消息异常，那么停止更新
                    logger.critical(
                        'push reloading message error! {}:{}'.format(ip, port))
                    break
                time.sleep(5)
            time.sleep(30)
            touch_cmd = 'ssh root@{} touch /data/thy/price_model/api_health/health_check'.format(
                ip)
            os.system(touch_cmd)
            time.sleep(10)

    def cal_history_avg_price(self, to_redis=False):
        """
        计算sku历史平均价格
        :return:
        """
        logger.info('calculate history avg price...')
        history_price_date = self.model_date - datetime.timedelta(
            days=laptop_model_config.LAPTOP_HISTORY_AVG_PRICE_DAYS)
        history_price_end = self.model_date - datetime.timedelta(
            days=1)
        history_avg_price_sql = """
        SELECT cast(product_sku_key as int) product_sku_key, cast(product_level_key as int) product_level_key, avg(predict_origin) AS avg_price
        from ods.ods_price_prediction_price_prediction 
        WHERE product_category_id = 5 AND predict_date >= '{}' and predict_origin>0 and product_brand_id  in (9,484,184,52) 
        GROUP BY product_sku_key, product_level_key
        """.format(history_price_date.strftime('%Y-%m-%d'))

        history_avg_price = presto_processor.load_sql(history_avg_price_sql)
        # 写入pickle
        # history_avg_price_pkl = open('laptop_history_avg.pkl','wb')
        # pickle.dump(history_avg_price,history_avg_price_pkl)
        # history_avg_price_pkl.close()
        # 读取pickle
        # history_avg_price_pkl = open('laptop_history_avg.pkl', 'rb')
        # history_avg_price = pickle.load(history_avg_price_pkl)

        self.history_avg_price = history_avg_price

        if to_redis:
            logger.info('saving laptop history price into redis')
            history_avg_price.index = laptop_model_config.LAPTOP_HISTORY_AVG_PRICE_PREFIX + \
                                      history_avg_price['product_sku_key'].astype(
                                          str) + '_' + history_avg_price['product_level_key'].astype(str)
            redis_dict = history_avg_price['avg_price'].to_dict()
            # for key, value in redis_dict.items():
            #     redis_conn.set(
            #         key, value, ex=laptop_model_config.LAPTOP_HISTORY_CACHE_TIME)
            self.insertInRedisByPipline(redis_dict, 10000)
            logger.info('saving laptop history price into redis done !')

    def insertInRedisByPipline(self, data, batchSize=10000):
        i = 0
        pipe = redis_conn.pipeline()
        for k, v in data.items():
            i += 1
            pipe.set(k, v, ex=laptop_model_config.LAPTOP_HISTORY_CACHE_TIME)
            if (i % batchSize == 0):
                try:
                    pipe.execute()
                except Exception as e:
                    logger.error('pipline save into redis size i={} has error'.format(i))
                # if(i>=200000):
                #     break
                logger.info('pipline save into redis size i={}'.format(i))
        pipe.execute()

    def __load_attr_default_price(self):
        """
        读取没有属性配置信息笔记本电脑的默认价格
        :return:
        """
        logger.info('loading default price...')
        sql = "SELECT product_level_name, actual_price as default_price FROM laptop_attr_default_price"
        self.attr_default_price = mysql_prediction_processor.load_sql(sql)

    def load_models(self):
        """
        读取模型与预处理器
        :return:
        """
        logger.info('loading models...')
        model_file = os.path.join(laptop_predict_config.MODEL_DIR, laptop_model_config.MODEL_FILE_NAME)
        with graph.as_default():
            set_session(sess)
            # self.model = keras.models.load_model(model_file)
            self.model = keras.models.load_model(model_file, custom_objects={
                'custom_mean_absolute_percentage': custom_mean_absolute_percentage,
                'leaky_relu': tf.nn.leaky_relu})

        ohe_file = os.path.join(laptop_predict_config.MODEL_DIR, laptop_model_config.MODEL_OHE_NAME)
        with open(ohe_file, 'rb') as f:
            self.ohe = pickle.load(f)

        # scaler_file = os.path.join(laptop_predict_config.MODEL_DIR, laptop_model_config.MODEL_SCALER_NAME)
        # with open(scaler_file, 'rb') as f:
        #     self.scaler = pickle.load(f)

        product_ohe_file = os.path.join(laptop_predict_config.MODEL_DIR, laptop_model_config.MODEL_PRODUCT_OHE_NAME)
        with open(product_ohe_file, 'rb') as f:
            self.product_ohe = pickle.load(f)

        self.__load_attr_default_price()

        logger.info('loading models done')

    def process_complement_data(self):
        """
        处理补全数据
        :return:
        """
        logger.info('processing complement data...')
        data = self.__load_complement_data()

        # #写入pickle
        # complement_pkl = open('laptop_complement_data.pkl', 'wb')
        # pickle.dump(data,complement_pkl)
        # complement_pkl.close()
        # 读取pickle
        # complement_pkl = open('laptop_complement_data.pkl', 'rb')
        # data = pickle.load(complement_pkl)

        data = data.drop_duplicates(['product_sku_id', 'property'])
        data['property'] = data['property'].map(lambda x: x.strip())
        data['property_value'] = data['property_value'].map(lambda x: x.strip())

        data = data.pivot_table(index='product_sku_id', columns='property', values='property_value',
                                aggfunc=lambda x: x).reset_index()
        data.fillna('unknown', inplace=True)
        self.pivot_data = data
        self.pivot_data['CPU'] = self.pivot_data['CPU'].astype(str)
        self.pivot_data['GPU'] = self.pivot_data['GPU'].astype(str)
        # 写入pickle
        # pivot_pkl = open('laptop_pivot_data.pkl', 'wb')
        # pickle.dump(data, pivot_pkl)
        # pivot_pkl.close()
        # 读取pickle
        # pivot_pkl = open('laptop_pivot_data.pkl', 'rb')
        # data = pickle.load(pivot_pkl)

        self.levels = self.__load_product_levels()
        self.brands = self.__load_product_brand()
        self.template_level_mapping = self.__load_template_level_mapping()
        self.template_level_mapping = self.template_level_mapping.rename(columns={'level_id': 'product_level_id'})
        # 做笛卡尔积
        self.pivot_data['_tmp_key'] = 0
        self.levels['_tmp_key'] = 0

    def process_complement_merge_data(self, begin, end):
        data = pd.merge(self.pivot_data.iloc[begin:end, :], self.levels, on='_tmp_key', how='outer').drop(
            columns='_tmp_key')
        data = pd.merge(data, self.brands, on='product_brand_name', how='left')
        data.product_brand_id.fillna(-1, inplace=True)
        data = preprocess_model_data(data, is_cal_period=False)

        data['period'] = '0'
        data['product_category_id'] = 5
        data['product_category_name'] = '笔记本'

        # #写入pickle
        # laptop_preprocess_model_data = open('laptop_preprocess_model_data.pkl', 'wb')
        # pickle.dump(data, laptop_preprocess_model_data)
        # laptop_preprocess_model_data.close()
        # 读取pickle
        # laptop_preprocess_model_data = open('laptop_preprocess_model_data.pkl', 'rb')
        # data = pickle.load(laptop_preprocess_model_data)

        self.complement_data = data
        for feature in MODEL_FEATURES:
            if feature not in self.complement_data:
                self.complement_data[feature] = 'unknown'
                logger.info("添加遗漏特征：{}".format(feature))

        # template_level_mapping = self.template_level_mapping.rename(columns={'level_id': 'product_level_id'})
        self.complement_data = self.complement_data.rename(columns={'level_id': 'product_level_id'})

        self.predict_data = pd.merge(self.complement_data, self.template_level_mapping,
                                     on=['product_level_template_id', 'product_level_id'])

        self.predict_data['level_id'] = self.predict_data['product_level_id']
        self.predict_data['date'] = self.model_date.strftime('%Y-%m-%d')

    def load_predict_save_data(self):

        # dfCount = self.__load_complement_total_data()
        self.pivot_data_size = self.pivot_data.shape[0]

        self.batch_size = laptop_model_config.PREDICT_BATCH_SIZE
        logger.info('totalCnt:{};batchSize:{}'.format(self.pivot_data_size, self.batch_size))
        gen = self.genertor_predict_batch(self.pivot_data_size, self.batch_size)
        iter = 0
        while True:

            begin = next(gen)
            end = begin + self.batch_size
            if (begin > self.pivot_data_size):
                break
            iter += 1
            logger.info('laptop iter={};begin={}~~end={} load_predict_save...'.format(iter, begin, end))
            # data = self.__load_complement_batch_data(self.batch_size, offset)
            self.process_complement_merge_data(begin, end)
            # self.predict_data = self.complement_data.iloc[begin:end,:]
            self.predict_complement_data()
            self.save_complement_data()

    def predict_complement_data(self):
        """
        预测补全数据
        :return:
        """
        if self.predict_data is None:
            logger.critical('predict_data is None!')
            return

        logger.info('predicting complement data...')

        self.predict_data['period'] = '0'
        # self.predict_data['small_version'] = self.predict_data['small_version'].astype(str)
        test_x_inputs = self.ohe.transform(self.predict_data[laptop_model_config.MODEL_FEATURES])
        predict_scaled = self.model.predict(test_x_inputs)
        logger.info('predicting complement data...done！')

        self.predict_data['forecast_reference_price'] = np.round(
            predict_scaled.flatten(-1)).astype(int)

        self.predict_data['predict_origin'] = self.predict_data['forecast_reference_price']

        self.history_avg_price = self.history_avg_price.rename(
            columns={'product_sku_key': 'product_sku_id', 'product_level_key': 'product_level_id',
                     'avg_price': 'history_avg_price'})
        self.predict_data = pd.merge(self.predict_data, self.history_avg_price,
                                     on=['product_sku_id', 'product_level_id'], how='left')
        self.predict_data['history_avg_price'] = self.predict_data['history_avg_price'].where(
            self.predict_data['history_avg_price'].notnull(), self.predict_data['forecast_reference_price'])
        self.predict_data['forecast_reference_price'] = self.predict_data[
                                                            'forecast_reference_price'] * laptop_predict_config.MODEL_PRICE_WEIGHT + \
                                                        self.predict_data[
                                                            'history_avg_price'] * laptop_predict_config.HISTORY_AVG_PRICE_WEIGHT
        # 判断是否是新品
        # self.predict_data['is_new_product'] = x_predict_product.sum(axis=1).astype(int)
        # self.predict_data['is_new_product'] = self.predict_data['is_new_product'].replace({0: 1, 1: 0})

        self.predict_data['is_new_product'] = self.product_ohe.transform(
            self.predict_data[['product_name']]).sum(axis=1).astype(int)
        self.predict_data['is_new_product'] = self.predict_data['is_new_product'].replace({0: 1, 1: 0})

        # 处理按照level打折
        # self.predict_data = pd.merge(self.predict_data, laptop_model_config.NEW_LEVEL_COEF, on='product_level_name', how='left')
        # self.predict_data['coef'].where(~self.predict_data['coef'].isnull(), 1, inplace=True)
        # self.predict_data['forecast_reference_price'] = self.predict_data['forecast_reference_price'] * self.predict_data['coef']

        # 对无属性信息的产品使用默认价格
        if self.attr_default_price is not None:
            is_no_attr = (self.predict_data[laptop_model_config.NO_ATTR_CHECK_LIST].isin(
                ['unknown', 'APPLE_HDD', 'MS_HDD'])).all(axis=1)
            self.predict_data = pd.merge(self.predict_data, self.attr_default_price, on='product_level_name',
                                         how='left')
            self.predict_data['default_price'].fillna(5, inplace=True)
            self.predict_data.loc[is_no_attr, 'forecast_reference_price'] = self.predict_data.loc[
                is_no_attr, 'default_price']
            self.predict_data.loc[is_no_attr, 'is_new_product'] = 0

        # 处理其他属性都有，无机械硬盘无ssd的情况
        all_have_but_hd_attr = ((~self.predict_data[laptop_model_config.ALL_HAVE_CHECK_LIST].isin(
            ['unknown'])).all(axis=1)) & ((self.predict_data[laptop_model_config.HD_CHECK_LIST].isin(
            ['不含固态硬盘', '不含机械硬盘', 'APPLE_HDD', 'MS_HDD'])).all(axis=1))
        self.predict_data.loc[all_have_but_hd_attr, 'forecast_reference_price'] = \
            self.predict_data.loc[all_have_but_hd_attr, 'forecast_reference_price'] * 0.2
        self.predict_data['is_no_hd'] = 0
        self.predict_data.loc[all_have_but_hd_attr, 'is_no_hd'] = 1

        # 处理最小值
        self.predict_data['forecast_reference_price'] = self.predict_data['forecast_reference_price'].where(
            self.predict_data['forecast_reference_price'] >= 5, 5)

    def save_complement_data(self):
        """
        保存补全数据
        :return:
        """
        if self.predict_data is not None:
            logger.info('saving complement data...')

            dt = datetime.datetime.strptime(time.strftime('%Y-%m-%d'), '%Y-%m-%d') + datetime.timedelta(days=1)
            self.predict_data['date'] = dt.strftime('%Y-%m-%d')
            insert_sql = """
            INSERT INTO price_prediction_train_qianyi_test(date, product_sku_key, product_sku_name, product_level_key, 
            product_level_name, product_key, product_name, product_category_id, product_category_name, product_brand_id,
            product_brand_name, predict_origin, forecast_reference_price,is_new_product,is_no_hd)
            VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
            """
            mysql_price_model.execute_insert_sql(insert_sql,
                                                 self.predict_data[
                                                     ['date', 'product_sku_id', 'product_sku_name', 'product_level_id',
                                                      'product_level_name', 'product_id', 'product_name',
                                                      'product_category_id', 'product_category_name',
                                                      'product_brand_id',
                                                      'product_brand_name', 'predict_origin',
                                                      'forecast_reference_price',
                                                      'is_new_product', 'is_no_hd']
                                                 ].to_records(index=False).tolist())
            logger.info('saving predict data to mysql done size={}'.format(self.predict_data.shape[0]))
            self.mysql_price_num += len(self.predict_data)

    def save_yesterday_data_to_today(self):
        copy_sql = """
        INSERT INTO price_prediction_train(date, product_sku_key, product_sku_name, product_level_key,
                    product_level_name, product_key, product_name, product_category_id, product_category_name, product_brand_id,
                    product_brand_name, forecast_reference_price,is_new_product,is_no_hd,predict_origin,POLY_pred_price)
                    select DATE_FORMAT(NOW(),'%Y-%m-%d') as date, product_sku_key, product_sku_name, product_level_key,
                    product_level_name, product_key, product_name, product_category_id, product_category_name, product_brand_id,
                    product_brand_name,POLY_pred_price as forecast_reference_price,is_new_product,is_no_hd,predict_origin,POLY_pred_price

                    from `price_prediction` where `product_category_id` in (5) 
        """
        mysql_price_model.execute_sql(copy_sql)

        query_sql = """
        select count(*) from price_prediction_train  where product_category_id in (5) 
        """
        dfCount = mysql_price_model.load_sql(query_sql)
        self.mysql_price_num = dfCount.iloc[0, 0]
        logger.info('执行copy任务完成！')

    def genertor_predict_batch(self, totalCnt, batch_size):
        '''
        参数：
            batch_size:批次
        返回:
            一个generator，x: 获取的批次图片 y: 获取的图片对应的标签
        '''
        # while 1:
        for i in range(0, totalCnt + batch_size, batch_size):
            # x = i:i + batch_size]
            # 最重要的就是这个yield，它代表返回，返回以后循环还是会继续，然后再返回。就比如有一个机器一直在作累加运算，但是会把每次累加中间结果告诉你一样，直到把所有数加完
            yield i

    def __load_complement_data(self):
        """
        读取测试数据
        :return:
        """
        return presto_processor.load_sql(COMPLETE_DATA_SQL)

    def __load_product_levels(self):
        """
        读取等级
        :return:
        """
        return presto_processor.load_sql(LEVEL_SQL)

    def __load_product_brand(self):
        """
        读取等级
        :return:
        """
        return presto_processor.load_sql(BRAND_ID_NAME_SQL)

    def __load_template_level_mapping(self):
        """
        读取等级模板和等级对应关系
        :return:
        """
        mapping = presto_processor.load_sql(LEVEL_TEMPLATE_MAPPING_SQL)
        mapping['product_level_template_id'] = mapping['product_level_template_id'].astype(str)
        mapping['level_id'] = mapping['level_id'].astype(int)

        return mapping

    def launch_model_anomaly_save_yesterday_data(self):
        """
        启动模型过程
        :return:
        # """
        self.load_model_data()
        self.eliminate_anomaly()
        self.detect_anomaly()
        # 在当日异常数据处理完成后，再次调用排除异常的方法来排除当日异常数据
        self.eliminate_anomaly()
        self.save_yesterday_data_to_today()

    def launch_model(self):
        """
        启动模型过程
        :return:
        # """
        self.load_model_data()
        # self.eliminate_anomaly()
        # self.detect_anomaly()
        # 在当日异常数据处理完成后，再次调用排除异常的方法来排除当日异常数据
        # self.eliminate_anomaly()
        # self.train_model()
        self.extract_training_data()
        self.hyper_train_model()
        # self.evaluate_model()
        # self.process_fish_product()
        # self.save_model()
        # self.cal_history_avg_price()
        # self.__load_attr_default_price()
        # self.load_models()
        # self.process_complement_data()
        # self.load_predict_save_data()


if __name__ == '__main__':
    model = LaptopModel()

    model.launch_model()
