import datetime
import json
import os
import pickle
import time
import math
import numpy as np
import pandas as pd
import requests
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler, OrdinalEncoder
from tensorflow.python.keras import regularizers

from src.laptop.anomaly import anomaly_file_loader as af_loader
from src.laptop.anomaly.anomaly_detect import LaptopAnomalyDetector
from src.laptop.anomaly.anomaly_load import load_anomaly_document_item
from src.laptop.model import laptop_model_config
from src.laptop.preprocessor.fish_product_data_loader import preprocess_fish_product
from src.laptop.preprocessor.settle_data_loader import preprocess_settle_data
from src.laptop.preprocessor.model_data_preprocessor import preprocess_model_data, random_ratio
from src.utils.config import logger, config
from src.utils.db_processor import mysql_prediction_processor, mysql_price_model, presto_processor
from src.utils.feishu_message import feishu_messager
from src.utils.util import check_conflict_file, get_today, format_date_string
from src.laptop.model.laptop_model_config import ALL_FEATURES
from src.utils.model_utils import getMape, custom_mean_absolute_percentage
from src.laptop.job.sku_price_complement import COMPLETE_DATA_SQL, LEVEL_SQL, \
    LEVEL_TEMPLATE_MAPPING_SQL, BRAND_ID_NAME_SQL, query_sku_sql
from src.utils.redis_pool import redis_conn
from src.laptop.predict import laptop_predict_config
from sklearn.utils import shuffle

import tensorflow as tf
from tensorflow import keras
from tensorflow.python.keras.backend import set_session

from keras.layers import Input, Dense, Dropout, concatenate
from keras.models import Model
from keras import optimizers

session_config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=session_config)
global graph
graph = tf.get_default_graph()


def repeatData(df, size=3):
    X = df
    X = X.reset_index()
    # ind = np.arange(X.shape[0])
    dfsize = X.shape[0]
    logger.info('dfsize={}'.format(dfsize))
    if dfsize == 0:
        return X
    ind = np.random.randint(0, high=dfsize, size=dfsize * (size - 1))
    for i in range(7):
        np.random.shuffle(ind)
    repeatDf = X.loc[ind]
    samples_offset = random_ratio(dfsize * (size - 1))
    repeatDf.item_quotation_price_num = repeatDf.item_quotation_price_num * samples_offset
    return repeatDf


class LaptopModel:
    """
    笔记本价格模型构建过程
    1. 读取结算数据
    2. 排除异常数据
    3. 检测当日出货物品异常数据
    4. 训练模型
    5. 检测价格趋势异常数据
    6. 处理钓鱼物品
    7. 评估模型
    8. 保存模型
    9. 推送模型
    10. 调用清除缓存接口
    """

    def __init__(self, model_date=None):
        """

        :param model_date: 建模日期
        """
        if model_date is None:
            self.model_date = get_today()
        else:
            self.model_date = model_date

        self.model_data = None
        self.model = None
        self.train_history = None
        self.mysql_price_num = 0
        # 特征变量One-Hot编码处理器
        # self.ohe = OneHotEncoder(handle_unknown='ignore', sparse=False)
        self.ohe = OneHotEncoder(handle_unknown='ignore')
        self.standard = StandardScaler()
        self.ode = OrdinalEncoder()
        # self.ohe_level = OneHotEncoder(handle_unknown='ignore')
        # self.ohe_attr = OneHotEncoder(handle_unknown='ignore')
        # self.ohe_period = OneHotEncoder(handle_unknown='ignore')
        # 价格标准化
        self.scaler = StandardScaler()
        # 模型推送文件列表
        self.push_model_file_list = []
        # api缓存清除url
        self.ab_price_api_url = config.get_config(
            'ab_price_api', 'cache_clear_url')
        self.history_avg_price = pd.DataFrame({'product_sku_key': [], 'product_level_key': [], 'avg_price': []})

        self.brand_fe = None
        self.product_fe = None
        logger.info('self.model_date is@{}'.format(self.model_date))
    def load_model_data(self):
        """
        读取建模数据
        :return:
        """
        self.model_data,self.product_ohe = preprocess_settle_data(model_date=self.model_date)
        logger.info('model_data1 shape@{}'.format(self.model_data.shape))
        self.model_data = self.model_data[self.model_data['ssd_flag']==0]
        self.model_data = self.model_data[self.model_data['memory_flag']==0]
        self.model_data = self.model_data[self.model_data['hd_flag']==0]
        logger.info('model_data2 shape@{}'.format(self.model_data.shape))


    def weight_function(self, s, relative_date=None, max_period=90):
        """
        计算周期，30天为一周期
        :param s: 日期序列
        :param relative_date: 当前日期，默认为当天日期
        :param max_period: 最大周期数
        :param interval: 周期间隔
        :return:
        """
        if relative_date is None:
            relative_date = get_today()
        diff_days = relative_date - s
        diff_days = diff_days.days
        period = max_period - diff_days
        return min(max(period, 1),90)/ 3.0

    # 数据乱序
    def shuffle(self, data):
        X = data
        ind = np.arange(X.shape[0])
        for i in range(7):
            np.random.shuffle(ind)
        return X.loc[ind]

    def eliminate_anomaly(self):
        """
        排除异常数据
        :return:
        """
        logger.info('eliminating anomaly...')
        # 异常数据文件
        scalping_document_id = af_loader.load_scalping_document_id()
        scalping_product_no = af_loader.load_scalping_product_no()
        self.model_data = self.model_data[~self.model_data['document_item_id'].isin(
            scalping_document_id)]
        self.model_data = self.model_data[~self.model_data['product_no'].isin(
            scalping_product_no)]
        # 数据库异常数据
        anomaly_document_item = load_anomaly_document_item()
        if not anomaly_document_item.empty:
            logger.info('eliminating anomaly products size@{}'.format(
                anomaly_document_item.shape))
            self.model_data = self.model_data[~self.model_data['document_item_id'].isin(
                anomaly_document_item['document_item_id'])]

    def detect_anomaly(self):
        """
        检测异常数据
        - 以昨日最新的模型对物品进行预测，预测价和最新的出货价进行对比
        :return:
        """
        history_date = self.model_date - datetime.timedelta(days=2)
        anomaly_detector = LaptopAnomalyDetector(self.model_data, history_date)
        anomaly_detector.launch_anomaly_detection()


    def build_new_model_keras(self, input_shape, input_shape_b):

        input = Input(shape=(input_shape,))
        input_b = Input(shape=(input_shape_b,))

        output = Dense(3072, activation='relu')(input)
        output_b = Dense(3072, activation='relu')(input_b)
        output = concatenate([output, output_b], axis=-1)

        output = Dropout(0.1)(output)
        output = Dense(2048, activation='relu')(output)
        output = Dropout(0.1)(output)
        output = Dense(512, activation='relu')(output)
        output = Dense(1, kernel_regularizer=regularizers.l1_l2(l1=0.01, l2=0.17), activation='linear')(output)

        self.model = Model(inputs=[input, input_b], outputs=output)

        self.model.compile(
            loss='mse',
            optimizer=optimizers.Adam(lr=0.0002)
            , metrics=['mape', 'mse']
        )

    def additional_features(self, data):
        # 交叉特征
        Train_gb = data.groupby("product_brand_id")
        all_info = {}
        for kind, kind_data in Train_gb:
            info = {}
            kind_data = kind_data[kind_data['item_quotation_price_num'] > 0]
            info['brand_amount'] = len(kind_data)
            info['brand_price_max'] = kind_data.item_quotation_price_num.max()
            info['brand_price_median'] = kind_data.item_quotation_price_num.median()
            info['brand_price_min'] = kind_data.item_quotation_price_num.min()
            info['brand_price_sum'] = kind_data.item_quotation_price_num.sum()
            info['brand_price_std'] = kind_data.item_quotation_price_num.std()
            info['brand_price_mean'] = kind_data.item_quotation_price_num.mean()
            info['brand_price_skew'] = kind_data.item_quotation_price_num.skew()
            info['brand_price_kurt'] = kind_data.item_quotation_price_num.kurt()
            info['brand_price_mad'] = kind_data.item_quotation_price_num.mad()

            all_info[kind] = info

        brand_fe = pd.DataFrame(all_info).T.reset_index().rename(columns={"index": "product_brand_id"})
        data = data.merge(brand_fe, how='left', on='product_brand_id')

        Train_gb = data.groupby("product_id")
        all_info = {}
        for kind, kind_data in Train_gb:
            info = {}
            kind_data = kind_data[kind_data['item_quotation_price_num'] > 0]
            info['product_amount'] = len(kind_data)
            # info['product_price_max'] = kind_data.item_quotation_price_num.max()
            # info['product_price_median'] = kind_data.item_quotation_price_num.median()
            # info['product_price_min'] = kind_data.item_quotation_price_num.min()
            # info['product_price_sum'] = kind_data.item_quotation_price_num.sum()
            # info['product_price_std'] = kind_data.item_quotation_price_num.std()
            # info['product_price_mean'] = kind_data.item_quotation_price_num.mean()
            # info['product_price_skew'] = kind_data.item_quotation_price_num.skew()
            # info['product_price_kurt'] = kind_data.item_quotation_price_num.kurt()
            # info['product_price_mad'] = kind_data.item_quotation_price_num.mad()
            all_info[kind] = info

        product_fe = pd.DataFrame(all_info).T.reset_index().rename(columns={"index": "product_id"})
        data = data.merge(product_fe, how='left', on='product_id')

        return data, brand_fe, product_fe

    def get_model_available_data(self, x, y, is_train=False, is_standard=False):
            '''

            '''
            if is_train:
                x_ava = self.ohe.fit_transform(x[laptop_model_config.CLASSIFICATION_FEATURES])
            else:
                x_ava = self.ohe.transform(x[laptop_model_config.CLASSIFICATION_FEATURES])
            if is_standard:
                if is_train:
                    x_ava_continuous = self.standard.fit_transform(x[laptop_model_config.CONTINUOUS_FEATURES])
                else:
                    x_ava_continuous = self.standard.transform(x[laptop_model_config.CONTINUOUS_FEATURES])
            else:
                x_ava_continuous = np.array(x[laptop_model_config.CONTINUOUS_FEATURES])
            # x_ava = x_ava.toarray()
            # x_ava = np.concatenate([x_ava, x_ava_continuous], axis=1)
            y_ava = y
            return x_ava, x_ava_continuous, y_ava


    def train_model(self):
        """
        训练模型
        :return:
        """
        os.system("echo 1 > /proc/sys/vm/drop_caches")
        starttime=time.time()

        if self.model_data is None:
            logger.critical('model_data is None!')
            return

        self.model_data['sku_level'] = self.model_data['secondary_sku_id'].astype(str) + '_' + self.model_data['secondary_level_name'].astype(str)
        is_test = 0
        RECENT_DAYS = 3
        if is_test == 0:
            # 线上部署
            recent_date = self.model_date - datetime.timedelta(days=RECENT_DAYS)
            recent_data = self.model_data[(self.model_data['shop_out_date'] > recent_date) & (
                    self.model_data['shop_out_date'] <= self.model_date)]  # kr
            train_data = self.model_data
        elif is_test == 1:
            # 测试 训练集可使用T-2天及之前数据时
            recent_date = self.model_date
            train_date = self.model_date - datetime.timedelta(days=1)

            recent_data = self.model_data[self.model_data['shop_out_date'] == recent_date]
            train_data = self.model_data[self.model_data['shop_out_date'] < train_date]

        else:
            # 测试 训练集可使用T-1天及之前数据时
            recent_date = self.model_date - datetime.timedelta(days=1)
            train_date = self.model_date - datetime.timedelta(days=1)
            recent_data = self.model_data[self.model_data['shop_out_date'] == recent_date]  # 用来看模型拟合能力
            train_data = self.model_data[self.model_data['shop_out_date'] <= train_date]


        #补充特征
        train_data,self.brand_fe,self.product_fe = self.additional_features(train_data)
        recent_data = recent_data.merge(self.brand_fe, how='left', on='product_brand_id')
        recent_data = recent_data.merge(self.product_fe, how='left', on='product_id')
        train_data.fillna(train_data.median(), inplace=True)
        recent_data['brand_amount'].fillna(1, inplace=True)
        recent_data['brand_price_std'].fillna(0, inplace=True)
        recent_data.fillna({'brand_price_max': 200, 'brand_price_mean': 200, 'brand_price_median': 200, 'brand_price_min': 200}, inplace=True)
        recent_data['product_amount'].fillna(1, inplace=True)

        self.median_dict = dict(train_data.median())
        recent_data.fillna(self.median_dict, inplace=True)

        #设置训练预测集数据类型
        train_data[laptop_model_config.CLASSIFICATION_FEATURES] = train_data[
            laptop_model_config.CLASSIFICATION_FEATURES].astype(str)
        train_data[laptop_model_config.CONTINUOUS_FEATURES] = train_data[
            laptop_model_config.CONTINUOUS_FEATURES].astype(float)
        recent_data[laptop_model_config.CLASSIFICATION_FEATURES] = recent_data[
            laptop_model_config.CLASSIFICATION_FEATURES].astype(str)
        recent_data[laptop_model_config.CONTINUOUS_FEATURES] = recent_data[
            laptop_model_config.CONTINUOUS_FEATURES].astype(float)

        # train_data['weight'] = train_data['settle_list_create_date'].apply(self.weight_function)
        train_data['weight'] = 1

        train_data = train_data[train_data['product_name'] != '不可开机笔记本']
        train_data = shuffle(train_data, random_state=1)

        split_flag = 1
        is_standard = True

        if split_flag:
            # x_valid 只使用opt的数据
            train_data_opt = train_data[train_data['is_pop']==0]
            train_data_pop = train_data[train_data['is_pop']==1]
            x_train, x_valid, y_train, y_valid = train_test_split(
                train_data_opt.drop(columns='item_quotation_price_num'),
                train_data_opt['item_quotation_price_num'],
                test_size=0.02, random_state=1)
            x_train = pd.concat([x_train, train_data_pop.drop(columns='item_quotation_price_num')])

            y_train = y_train.append(train_data_pop['item_quotation_price_num'])
            sample_weight = np.array(x_train['weight'])
            x_train, x_train_b, y_train = self.get_model_available_data(x_train, y_train, is_train=True, is_standard=is_standard)
            x_valid, x_valid_b, y_valid = self.get_model_available_data(x_valid, y_valid, is_standard=is_standard)

        else:
            x_train = train_data.drop(columns='item_quotation_price_num')
            y_train = train_data['item_quotation_price_num']

            sample_weight = np.array(x_train['weight'])

            x_train, x_train_b, y_train = self.get_model_available_data(x_train, y_train, is_train=True, is_standard=is_standard)
            x_valid, x_valid_b, y_valid = None, None, None

        x_recent_data, x_recent_data_b, y_recent_data = self.get_model_available_data(recent_data, recent_data['item_quotation_price_num'], is_standard=is_standard)

        #################################训练模型阶段#################################
        # self.build_new_model(x_train.shape[1])
        self.build_new_model_keras(x_train.shape[1],x_train_b.shape[1])
        logger.info('starting training model...')
        t = time.time()

        self.train_history = self.model.fit([x_train, x_train_b], y_train,
                                                    epochs=300, verbose=1, batch_size=512,
                                                    validation_data=([x_valid, x_valid_b], y_valid),
                                                    callbacks=laptop_model_config.model_callbacks,
                                                    sample_weight=sample_weight
                                                    )

        logger.info("model mape is done... consume time {} min".format((time.time() - t) / 60))
        logger.info('model training done!')

        test_mape_new = -1
        try:
            recent_predict_data_new = self.model.predict([x_recent_data, x_recent_data_b])
            recent_data['predict_price'] = recent_predict_data_new
            test_mape_new = getMape(recent_predict_data_new, y_recent_data)
            logger.info('new_model mape @{}'.format(test_mape_new))

        except Exception as e:
            logger.info('test_mape is exception!')
        try:
            logger.info(
                'train model is done; train model use time is {} min,recent_mse={}'.format(
                    round((time.time() - starttime) / 60, 2), test_mape_new
                ))
        except Exception as e:
            logger.critical('终结者手机模型发送消息失败：@{}'.format(e))


    # def detect_anomaly_trend(self):
    #     """
    #
    #     :return:
    #     """
    #     model_date = self.model_date - datetime.timedelta(days=1)
    #     trend_data = self.model_data[self.model_data['shop_out_date'] == model_date]
    #     anomaly_trend_detector = LaptopAnomalyTrendDetector(
    #         trend_data, model_date)
    #     anomaly_trend_detector.launch_anomaly_detect()

    def process_fish_product(self):
        """
        处理"钓鱼"物品，用于监测模型价格变化趋势
        :return:
        """
        if self.model is None:
            logger.critical('model is None!')
            return

        fish_product = preprocess_fish_product()
        if fish_product.empty:
            logger.info('fish product is empty, skip...')
            return

        logger.info('process fish product size@{}'.format(fish_product.shape))
        fish_product['period'] = '0'
        fish_product['trace_date'] = format_date_string(get_today())
        x_fish_product = self.ohe_product.transform(
            fish_product[laptop_model_config.PRODUCT_FEATURES])
        x_fish_level = self.ohe_level.transform(
            fish_product[laptop_model_config.LEVEL_FEATURES])
        x_fish_attr = self.ohe_attr.transform(
            fish_product[laptop_model_config.ATTR_FEATURES])
        x_fish_period = self.ohe_period.transform(
            fish_product[laptop_model_config.PERIOD_FEATURES])

        predict_price = self.model.predict(
            [x_fish_product, x_fish_level, x_fish_attr, x_fish_period])
        fish_product['price'] = np.round(self.scaler.inverse_transform(
            predict_price.reshape((-1, 1))).flatten())
        trace_sql = """
        REPLACE INTO laptop_fish_product_price_trace(fish_id, trace_date, price)
        VALUES(%s, %s, %s)
        """
        mysql_prediction_processor.execute_insert_sql(trace_sql, fish_product[
            ['fish_id', 'trace_date', 'price']].to_records(index=False).tolist())

    def evaluate_model(self):
        """
        评估模型
        :return:
        """
        if self.train_history is None:
            logger.critical('train_history is None!')
            return

        epoch = self.train_history.epoch[-1]
        # 乘以方差来计算实际的loss
        train_loss = np.round(
            np.sqrt(self.train_history.history['loss'][-1]), 4)
        val_loss = np.round(
            (self.train_history.history['custom_mean_absolute_percentage'][-1]), 4)
        data_size = self.train_history.params['samples']

        logger.info('saving model evaluation...')
        evaluation_sql = """
        REPLACE INTO laptop_price_model_evaluation(model_date, epoch, train_loss, val_loss, data_size)
        VALUES(%s, %s, %s, %s, %s)
        """
        evaluation_data = [(format_date_string(self.model_date), epoch, float(
            train_loss), float(val_loss), data_size)]
        mysql_prediction_processor.execute_insert_sql(
            evaluation_sql, evaluation_data)

    def save_model(self):
        """
        保存模型和预处理器
        :return:
        """
        if self.model is None:
            logger.critical('model is None!')
            return

        logger.info('saving model...')

        check_conflict_file(laptop_model_config.MODEL_DIR,
                            laptop_model_config.MODEL_FILE_NAME)
        model_file = os.path.join(
            laptop_model_config.MODEL_DIR, laptop_model_config.MODEL_FILE_NAME)
        self.model.save(model_file)

        check_conflict_file(laptop_model_config.MODEL_DIR,
                            laptop_model_config.MOBILE_OHE_NAME)
        with open(os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.MOBILE_OHE_NAME), 'wb') as f:
            pickle.dump(self.ohe, f)

        check_conflict_file(laptop_model_config.MODEL_DIR,
                            laptop_model_config.MOBILE_PRODUCT_OHE_NAME)
        with open(os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.MOBILE_PRODUCT_OHE_NAME), 'wb') as f:
            pickle.dump(self.product_ohe, f)

        check_conflict_file(laptop_model_config.MODEL_DIR,
                            laptop_model_config.LAPTOP_STANDARD)
        with open(os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.LAPTOP_STANDARD), 'wb') as f:
            pickle.dump(self.standard, f)

        check_conflict_file(laptop_model_config.MODEL_DIR,
                            laptop_model_config.BRAND_FE)
        with open(os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.BRAND_FE), 'wb') as f:
            pickle.dump(self.brand_fe, f)

        check_conflict_file(laptop_model_config.MODEL_DIR,
                            laptop_model_config.PRODUCT_FE)
        with open(os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.PRODUCT_FE), 'wb') as f:
            pickle.dump(self.product_fe, f)

        check_conflict_file(laptop_model_config.MODEL_DIR,
                            laptop_model_config.median_dict)
        with open(os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.median_dict), 'wb') as f:
            pickle.dump(self.median_dict, f)


    def __save_preprocess(self, preprocessor, preprocessor_name):
        """
        保存预处理器
        :param preprocessor:
        :param preprocessor_name:
        :return:
        """
        file = preprocessor_name + laptop_model_config.PREPROCESSOR_SUFFIX_NAME
        check_conflict_file(laptop_model_config.MODEL_DIR, file)
        with open(os.path.join(laptop_model_config.MODEL_DIR, file), 'wb') as f:
            pickle.dump(preprocessor, f)
        self.push_model_file_list.append(file)

    def push_model(self):
        """
        推送模型文件
        :return:
        """
        logger.info(
            'pushing models to remote server model_date@{}'.format(self.model_date))
        exist_server = laptop_model_config.MODEL_PUSH_SERVERS[0]
        if exist_server == 'no_server':
            logger.info('No server need to push, SKIP!')
            return

        for ip in laptop_model_config.MODEL_PUSH_SERVERS:
            logger.info('push server to server@{}'.format(ip))
            model_push_cmd = 'scp {}*.h5 root@{}:/data/thy/price_model/models/laptop/'. \
                format(laptop_model_config.MODEL_DIR, ip)
            os.system(model_push_cmd)

            preprocessor_push_cmd = 'scp {}*.pkl root@{}:/data/thy/price_model/models/laptop/'. \
                format(laptop_model_config.MODEL_DIR, ip)
            os.system(preprocessor_push_cmd)
            time.sleep(1)

    def push_model_monitor(self):
        """
        推送模型文件
        :return:
        """
        logger.info(
            'pushing models to remote server model_date@{}'.format(self.model_date))
        exist_server = laptop_model_config.MODEL_PUSH_SERVERS_NONTIOR[0]
        if exist_server == 'no_server':
            logger.info('No server need to push, SKIP!')
            return

        for ip in laptop_model_config.MODEL_PUSH_SERVERS_NONTIOR:
            logger.info('push server to server@{}'.format(ip))
            model_push_cmd = 'scp {}*.h5 root@{}:/data/thy/price_model/models/laptop/'. \
                format(laptop_model_config.MODEL_DIR, ip)
            os.system(model_push_cmd)

            preprocessor_push_cmd = 'scp {}*.pkl root@{}:/data/thy/price_model/models/laptop/'. \
                format(laptop_model_config.MODEL_DIR, ip)
            os.system(preprocessor_push_cmd)
            time.sleep(1)

    def push_gc_model(self):
        """
        推送模型文件
        :return:
        """
        logger.info(
            'pushing models to remote server model_date@{}'.format(self.model_date))
        exist_server = laptop_model_config.MODEL_PUSH_SERVERS[0]
        if exist_server == 'no_server':
            logger.info('No server need to push, SKIP!')
            return

        for ip in laptop_model_config.MODEL_PUSH_SERVERS:
            logger.info('push server to server@{}'.format(ip))
            model_push_cmd = 'scp {}*.h5 root@{}:/data/thy/price_model/models_gc/laptop/'. \
                format(laptop_model_config.MODEL_DIR, ip)
            os.system(model_push_cmd)

            preprocessor_push_cmd = 'scp {}*.pkl root@{}:/data/thy/price_model/models_gc/laptop/'. \
                format(laptop_model_config.MODEL_DIR, ip)
            os.system(preprocessor_push_cmd)
            time.sleep(1)

    def push_reload_model_message(self):
        """
        推送重新加载模型消息
        :return:
        """
        logger.info(
            'pushing reload model message model_date@{}'.format(self.model_date))
        exist_server = laptop_model_config.MODEL_PUSH_SERVERS[0]
        if exist_server == 'no_server':
            logger.info('No server need to push, SKIP!')
            return

        params = {"category": 5, "reload": 1, "data": [[]]}
        base_url = 'http://{}:{}/price/predict'
        for ip in laptop_model_config.MODEL_PUSH_SERVERS:
            rm_cmd = 'ssh root@{} rm -f /data/thy/price_model/api_health/health_check'.format(
                ip)
            os.system(rm_cmd)
            time.sleep(60)
            results = {'code': -1}
            for port in laptop_model_config.MODEL_SERVER_PORTS:
                request_url = base_url.format(ip, port)
                logger.info('push reload message to {}'.format(request_url))
                for i in range(1,4):
                    response = requests.post(request_url, json=params)
                    results = json.loads(response.content)
                    time.sleep(5)
                    if results['code'] == 0:
                        logger.info('push reload message success @time {} '.format(i))
                        break
                    else:
                        logger.info('push reload message fail {} times '.format(i))

                if results['code'] != 0:
                    # 如果有推送reload消息异常，那么停止更新
                    logger.critical(
                        'push reloading message error! {}:{}'.format(ip, port))
                    break
                time.sleep(5)
            time.sleep(30)
            touch_cmd = 'ssh root@{} touch /data/thy/price_model/api_health/health_check'.format(
                ip)
            os.system(touch_cmd)
            time.sleep(10)

    def clear_api_cache(self):
        """
        清除调用接口缓存
        :return:
        """
        if self.ab_price_api_url is None:
            logger.info('ab_price_api_url is None, SKIP!')
        else:
            try:
                response = requests.get(self.ab_price_api_url)
                if response.status_code != 200:
                    logger.critical('笔记本电脑清除abprice缓存失败！')
                    feishu_messager.send_message('笔记本电脑清除abprice缓存失败！')
            except Exception as e:
                logger.critical('笔记本电脑调用清除abprice缓存接口异常：{}'.format(e))
                feishu_messager.send_message(
                    '笔记本电脑调用清除abprice缓存接口异常：{}'.format(e))

    def clear_models(self):
        """
        清理历史模型文件
        :return:
        """
        logger.info('clearing model files {}'.format(self.model_date))
        clear_timestamp = (get_today(
        ) - datetime.timedelta(days=laptop_model_config.KEEP_MODEL_DAYS)).timestamp()
        model_files = os.listdir(laptop_model_config.MODEL_DIR)
        for file in model_files:
            if file.endswith('pkl') or file.endswith('h5'):
                file_name = os.path.join(laptop_model_config.MODEL_DIR, file)
                create_time = os.path.getctime(file_name)
                if create_time < clear_timestamp:
                    logger.info('removing file@{}'.format(file))
                    os.remove(file_name)

    def cal_history_avg_price(self,to_redis=False):
        """
        计算sku历史平均价格
        :return:
        """
        logger.info('calculate history avg price...')
        history_price_date = self.model_date - datetime.timedelta(
            days=laptop_model_config.LAPTOP_HISTORY_AVG_PRICE_DAYS)
        history_price_end = self.model_date - datetime.timedelta(
            days=1)
        history_avg_price_sql = """
        SELECT cast(product_sku_key as int) product_sku_key, cast(product_level_key as int) product_level_key, avg(predict_origin) AS avg_price
        from ods.ods_price_prediction_price_prediction
        WHERE product_category_id = 5 AND cast(predict_date  as date) >=date '{0}' and cast(predict_date  as date) <= date '{1}' and predict_origin>0 
        GROUP BY product_sku_key, product_level_key
        """.format(history_price_date.strftime('%Y-%m-%d'), self.model_date.strftime('%Y-%m-%d'))

        history_avg_price = presto_processor.load_sql(history_avg_price_sql)
        # 写入pickle
        # history_avg_price_pkl = open('laptop_history_avg.pkl','wb')
        # pickle.dump(history_avg_price,history_avg_price_pkl)
        # history_avg_price_pkl.close()
        # 读取pickle
        # history_avg_price_pkl = open('laptop_history_avg.pkl', 'rb')
        # history_avg_price = pickle.load(history_avg_price_pkl)

        self.history_avg_price = history_avg_price

        if to_redis:
            logger.info('saving laptop history price into redis')
            history_avg_price.index = laptop_model_config.LAPTOP_HISTORY_AVG_PRICE_PREFIX + \
                                      history_avg_price['product_sku_key'].astype(
                                          str) + '_' + history_avg_price['product_level_key'].astype(str)
            redis_dict = history_avg_price['avg_price'].to_dict()
            # for key, value in redis_dict.items():
            #     redis_conn.set(
            #         key, value, ex=laptop_model_config.LAPTOP_HISTORY_CACHE_TIME)
            self.insertInRedisByPipline(redis_dict, 10000)
            logger.info('saving laptop history price into redis done !')

    def insertInRedisByPipline(self, data, batchSize=10000):
        i = 0
        pipe = redis_conn.pipeline()
        for k, v in data.items():
            i += 1
            pipe.set(k, v, ex=laptop_model_config.LAPTOP_HISTORY_CACHE_TIME)
            if (i % batchSize == 0):
                try:
                    pipe.execute()
                except Exception as e:
                    logger.error('pipline save into redis size i={} has error'.format(i))
                # if(i>=200000):
                #     break
                logger.info('pipline save into redis size i={}'.format(i))
        pipe.execute()

    def __load_attr_default_price(self):
        """
        读取没有属性配置信息笔记本电脑的默认价格
        :return:
        """
        logger.info('loading default price...')
        sql = "SELECT product_level_name, actual_price as default_price FROM laptop_attr_default_price"
        self.attr_default_price = mysql_prediction_processor.load_sql(sql)


    def load_models(self):
        """
        读取模型和预处理器
        :return:
        """
        logger.info('loading models...')

        model_file = os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.MODEL_FILE_NAME)
        with graph.as_default():
            set_session(sess)
            self.model = keras.models.load_model(model_file, custom_objects={
                'custom_mean_absolute_percentage': custom_mean_absolute_percentage})

        ohe_file = os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.MOBILE_OHE_NAME)
        with open(ohe_file, 'rb') as f:
            self.ohe = pickle.load(f)


        product_ohe_file = os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.MOBILE_PRODUCT_OHE_NAME)
        with open(product_ohe_file, 'rb') as f:
            self.product_ohe = pickle.load(f)

        standard_file = os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.LAPTOP_STANDARD)
        with open(standard_file, 'rb') as f:
            self.standard = pickle.load(f)

        brand_fe_file = os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.BRAND_FE)
        with open(brand_fe_file, 'rb') as f:
            self.brand_fe = pickle.load(f)

        product_fe_file = os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.PRODUCT_FE)
        with open(product_fe_file, 'rb') as f:
            self.product_fe = pickle.load(f)

        median_dict_file = os.path.join(laptop_model_config.MODEL_DIR, laptop_model_config.median_dict)
        with open(median_dict_file, 'rb') as f:
            self.median_dict = pickle.load(f)

        logger.info('loading models done')

    def process_complement_data(self):
        """
        处理补全数据
        :return:
        """
        logger.info('processing complement data...')
        data = self.__load_complement_data()

        data = data.drop_duplicates(['product_sku_id', 'property'])
        data['property'] = data['property'].map(lambda x: x.strip())
        data['property_value'] = data['property_value'].map(lambda x: x.strip())

        data = data.pivot_table(index='product_sku_id', columns='property', values='property_value',
                                aggfunc=lambda x: x).reset_index()
        data.fillna('unknown', inplace=True)
        data['product_id'] = data['product_id'].astype(int)
        self.pivot_data = data
        self.pivot_data['CPU'] = self.pivot_data['CPU'].astype(str)
        self.pivot_data['GPU'] = self.pivot_data['GPU'].astype(str)

        self.levels = self.__load_product_levels()
        self.brands = self.__load_product_brand()
        self.template_level_mapping = self.__load_template_level_mapping()
        self.template_level_mapping = self.template_level_mapping.rename(columns={'level_id': 'product_level_id'})
        # 做笛卡尔积
        self.pivot_data['_tmp_key'] = 0
        self.levels['_tmp_key'] = 0


        self.pivot_data = pd.merge(self.pivot_data, self.brands, on='product_brand_name', how='left')
        self.pivot_data.product_brand_id.fillna(-1, inplace=True)

        self.pivot_data = preprocess_model_data(self.pivot_data, is_cal_period=False)

        self.pivot_data['date'] = self.model_date.strftime('%Y-%m-%d')
        self.pivot_data = self.pivot_data.merge(self.brand_fe, how='left', on='product_brand_id')
        self.pivot_data = self.pivot_data.merge(self.product_fe, how='left', on='product_id')
        self.pivot_data['brand_amount'].fillna(1, inplace=True)
        self.pivot_data['brand_price_std'].fillna(0, inplace=True)
        self.pivot_data.fillna({'brand_price_max': 200, 'brand_price_mean': 200, 'brand_price_median': 200, 'brand_price_min': 200}, inplace=True)
        self.pivot_data['product_amount'].fillna(1, inplace=True)
        # self.pivot_data['product_price_std'].fillna(0, inplace=True)
        self.pivot_data.fillna(self.median_dict, inplace=True)


    def process_complement_merge_data(self, begin, end):

        data = pd.merge(self.pivot_data.iloc[begin:end, :], self.levels, on='_tmp_key', how='outer').drop(
            columns='_tmp_key')

        # data = pd.merge(data, self.brands, on='product_brand_name', how='left')
        # data.product_brand_id.fillna(-1, inplace=True)

        data['period'] = '0'
        data['product_category_id'] = 5
        data['product_category_name'] = '笔记本'

        self.predict_data = data


        # self.complement_data = data

        # template_level_mapping = self.template_level_mapping.rename(columns={'level_id': 'product_level_id'})
        self.predict_data = self.predict_data.rename(columns={'level_id': 'product_level_id'})
        self.predict_data = pd.merge(self.predict_data, self.template_level_mapping, on=['product_level_template_id', 'product_level_id'])
        self.predict_data['secondary_level_id'] = self.predict_data['product_level_id']

        for feature in ALL_FEATURES:
            if feature not in self.predict_data:
                self.predict_data[feature] = 'unknown'

        self.predict_data[laptop_model_config.CLASSIFICATION_FEATURES] = self.predict_data[
            laptop_model_config.CLASSIFICATION_FEATURES].astype(str)
        self.predict_data[laptop_model_config.CONTINUOUS_FEATURES] = self.predict_data[
            laptop_model_config.CONTINUOUS_FEATURES].astype(float)


    def load_predict_save_data(self):

        # dfCount = self.__load_complement_total_data()
        self.pivot_data_size = self.pivot_data.shape[0]

        self.batch_size = laptop_model_config.PREDICT_BATCH_SIZE
        logger.info('totalCnt:{};batchSize:{}'.format(self.pivot_data_size, self.batch_size))
        gen = self.genertor_predict_batch(self.pivot_data_size, self.batch_size)
        # self.model_product_config = load_model_product_config_item()
        iter = 0
        delete_sql = '''
        truncate table price_prediction_train_test
        '''
        # mysql_price_model.execute_sql(delete_sql)
        while True:

            begin = next(gen)
            end = begin + self.batch_size
            if (begin > self.pivot_data_size):
                break
            iter += 1
            logger.info('laptop iter={};begin={}~~end={} load_predict_save...'.format(iter, begin, end))
            # data = self.__load_complement_batch_data(self.batch_size, offset)
            self.process_complement_merge_data(begin, end)
            # self.predict_data = self.complement_data.iloc[begin:end,:]
            self.predict_complement_data()
            self.save_complement_data()

    def predict_complement_data(self):
        """
        预测补全数据
        :return:
        """
        if self.predict_data is None:
            logger.critical('predict_data is None!')
            return

        logger.info('predicting complement data...')
        logger.info('predicting.shape is {}'.format(str(self.predict_data.shape)))
        test_x_inputs = self.ohe.transform(self.predict_data[laptop_model_config.CLASSIFICATION_FEATURES])
        x_ava_continuous = self.standard.transform(self.predict_data[laptop_model_config.CONTINUOUS_FEATURES])
        predict_scaled = self.model.predict([test_x_inputs, x_ava_continuous])
        logger.info('predicting complement data...done！')

        self.predict_data['forecast_reference_price_mape'] = np.round(predict_scaled.flatten(-1)).astype(int)

        self.predict_data['predict_origin'] = self.predict_data['forecast_reference_price_mape'].copy()

        self.predict_data['predict_origin_mse'] = self.predict_data['forecast_reference_price_mape'].copy()

        # 处理和均价的加权
        self.history_avg_price = self.history_avg_price.rename(
            columns={'product_sku_key': 'product_sku_id', 'product_level_key': 'product_level_id',
                     'avg_price': 'history_avg_price'})
        self.predict_data = self.predict_data.rename(columns={'level_id': 'product_level_id'})
        self.predict_data = pd.merge(self.predict_data, self.history_avg_price,
                                     on=['product_sku_id', 'product_level_id'], how='left')

        self.predict_data['history_avg_price_mape'] = self.predict_data['history_avg_price'].where(
            self.predict_data['history_avg_price'].notnull(), self.predict_data['forecast_reference_price_mape'])

        self.predict_data['forecast_reference_price_mape'] = self.predict_data[
                                                                 'forecast_reference_price_mape'] * laptop_predict_config.MODEL_PRICE_WEIGHT + \
                                                             self.predict_data[
                                                                 'history_avg_price_mape'] * laptop_predict_config.HISTORY_AVG_PRICE_WEIGHT


        self.predict_data['forecast_reference_price'] = self.predict_data['forecast_reference_price_mape']
        self.predict_data['forecast_reference_price_mse'] = self.predict_data['forecast_reference_price_mape']


        # 判断是否是新
        self.predict_data['is_new_product'] = self.product_ohe.transform(
            self.predict_data[laptop_model_config.PRODUCT_FEATURES]).sum(axis=1).astype(int)
        self.predict_data['is_new_product'] = self.predict_data['is_new_product'].replace({0: 1, 1: 0})

        # 对无属性信息的产品使用默认价格
        if self.attr_default_price is not None:
            is_no_attr = (self.predict_data[laptop_model_config.NO_ATTR_CHECK_LIST].isin(
                ['unknown', 'APPLE_HDD', 'MS_HDD'])).all(axis=1)
            self.predict_data = pd.merge(self.predict_data, self.attr_default_price, on='product_level_name', how='left')
            self.predict_data['default_price'].fillna(5, inplace=True)
            self.predict_data.loc[is_no_attr, 'forecast_reference_price'] = self.predict_data.loc[is_no_attr, 'default_price']
            self.predict_data.loc[is_no_attr, 'is_new_product'] = 0

        # 处理其他属性都有，无机械硬盘无ssd的情况
        all_have_but_hd_attr = ((~self.predict_data[laptop_model_config.ALL_HAVE_CHECK_LIST].isin(
            ['unknown'])).all(axis=1)) & ((self.predict_data[laptop_model_config.HD_CHECK_LIST].isin(
            ['不含固态硬盘', '不含机械硬盘', 'APPLE_HDD', 'MS_HDD'])).all(axis=1))
        self.predict_data.loc[all_have_but_hd_attr, 'forecast_reference_price'] = \
            self.predict_data.loc[all_have_but_hd_attr, 'forecast_reference_price'] * 0.2
        self.predict_data['is_no_hd'] = 0
        self.predict_data.loc[all_have_but_hd_attr, 'is_no_hd'] = 1

        other_cpu = (self.predict_data['CPU'].isin(['其它处理器','其他处理器','非酷睿 i 系列']))
        other_gpu = (self.predict_data['GPU'].isin(['其它显卡','其他显卡']))
        other_product_name = (self.predict_data['product_name'].isin(['可开机笔记本']))

        self.predict_data.loc[other_cpu, 'forecast_reference_price'] = self.predict_data.loc[other_cpu, 'forecast_reference_price'] * 0.6
        self.predict_data.loc[other_gpu, 'forecast_reference_price'] = self.predict_data.loc[other_gpu, 'forecast_reference_price'] * 0.6
        self.predict_data.loc[other_product_name, 'forecast_reference_price'] = self.predict_data.loc[other_product_name, 'forecast_reference_price'] * 0.6

        # 处理最小值
        self.predict_data['forecast_reference_price'] = self.predict_data['forecast_reference_price'].where(
            self.predict_data['forecast_reference_price'] >= 5, 5)

        # 乘以一个系数
        # self.predict_data['forecast_reference_price'] = self.predict_data['forecast_reference_price']*0.98

    def save_complement_data(self):
        """
        保存补全数据
        :return:
        """
        if self.predict_data is not None:
            logger.info('saving complement data...')
            # dt = datetime.datetime.strptime(self.model_date, '%Y-%m-%d'
            #                                 ) + datetime.timedelta(days=1)

            dt = self.model_date + datetime.timedelta(days=1)
            logger.info('data_date is {}'.format(dt))
            self.predict_data['date'] = dt.strftime('%Y-%m-%d')

            # self.predict_data.to_excel('./data/predict_data_result.xlsx', index=False, encoding='utf-8-sig')
            insert_sql = """
            INSERT INTO price_prediction_train(date, product_sku_key, product_sku_name, product_level_key, 
            product_level_name, product_key, product_name, product_category_id, product_category_name, product_brand_id,
            product_brand_name, predict_origin,predict_origin_mse, forecast_reference_price,forecast_reference_price_mse,forecast_reference_price_mape,
            is_new_product,is_no_hd)
            VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
            """
            mysql_price_model.execute_insert_sql(insert_sql,
                                                 self.predict_data[
                                                     ['date', 'product_sku_id', 'product_sku_name', 'product_level_id',
                                                      'product_level_name', 'product_id', 'product_name',
                                                      'product_category_id', 'product_category_name',
                                                      'product_brand_id',
                                                      'product_brand_name', 'predict_origin', 'predict_origin_mse',
                                                      'forecast_reference_price', 'forecast_reference_price_mse',
                                                      'forecast_reference_price_mape',
                                                      'is_new_product', 'is_no_hd']
                                                 ].to_records(index=False).tolist())
            logger.info('saving predict data to mysql done size={}'.format(self.predict_data.shape[0]))
            self.mysql_price_num += len(self.predict_data)

    def genertor_predict_batch(self, totalCnt, batch_size):
        '''
        参数：
            batch_size:批次
        返回:
            一个generator，x: 获取的批次图片 y: 获取的图片对应的标签
        '''
        # while 1:
        for i in range(0, totalCnt + batch_size, batch_size):
            # x = i:i + batch_size]
            # 最重要的就是这个yield，它代表返回，返回以后循环还是会继续，然后再返回。就比如有一个机器一直在作累加运算，但是会把每次累加中间结果告诉你一样，直到把所有数加完
            yield i

    def __load_complement_data(self):
        """
        读取测试数据
        :return:
        """
        return presto_processor.load_sql(COMPLETE_DATA_SQL)

    def __load_sku_list(self):
        sku_list = mysql_prediction_processor.load_sql(query_sku_sql)

        if sku_list.empty:
            return tuple([-1, -1])
        # return tuple(anomaly_detection.product_document_no.values)
        # 元组只有一个值时，出现 (1,)
        res = tuple(r[0] for r in sku_list.itertuples(index=False))
        if len(res) == 1:
            res = list(res)
            res.append(-1)
            res = tuple(res)
        return res

    # def __load_complement_sku_data(self):
    #     sku_list = self.__load_sku_list()
    #     res_df = presto_processor.load_sql(
    #         COMPLETE_DATA_SKU_SQL.format(sku_list, sku_list, sku_list, sku_list, sku_list, sku_list))
    #     return res_df

    def __load_product_levels(self):
        """
        读取等级
        :return:
        """
        return presto_processor.load_sql(LEVEL_SQL)

    def __load_product_brand(self):
        """
        读取等级
        :return:
        """
        return presto_processor.load_sql(BRAND_ID_NAME_SQL)

    def __load_template_level_mapping(self):
        """
        读取等级模板和等级对应关系
        :return:
        """
        mapping = presto_processor.load_sql(LEVEL_TEMPLATE_MAPPING_SQL)
        mapping['product_level_template_id'] = mapping['product_level_template_id'].astype(str)
        mapping['level_id'] = mapping['level_id'].astype(int)

        return mapping

    def launch_model_anomaly_save_yesterday_data(self):
        """
        启动模型过程
        :return:
        # """
        self.load_model_data()
        self.eliminate_anomaly()
        self.detect_anomaly()
        # 在当日异常数据处理完成后，再次调用排除异常的方法来排除当日异常数据
        self.eliminate_anomaly()
        self.save_yesterday_data_to_today()

    def launch_model(self):
        """
        启动模型过程
        :return:
        # """
        self.load_model_data()
        self.eliminate_anomaly()
        self.train_model()

        # self.evaluate_model()
        self.save_model()
        self.cal_history_avg_price()
        self.__load_attr_default_price()
        self.load_models()
        self.process_complement_data()
        # 2024/02/27 注释掉下面语句可以进行测试
        # self.load_predict_save_data()



