import os
import pickle

import numpy as np
import pandas as pd
from tensorflow import keras

from src.tablet.model import tablet_model_config
from src.tablet.predict.tablet_price_config import MAX_EVN_PRICE
from src.utils.config import logger
from src.utils.db_processor import mysql_prediction_processor, presto_processor
from src.utils.util import format_date_string


class TabletAnomalyDetector:
    def __init__(self, data, history_date):
        """

        :param data: 数据
        :param history_date: 模型计算数据日期
        """
        self.data = data
        self.history_date = history_date

        self.anomaly_data = None
        self.model = None
        self.anomaly_reference = None

        self.__load_models()
        self.__load_anomaly_error_range()

    def __load_models(self):
        """
        读取最新的模型与预处理器
        :return:
        """
        logger.info('loading anomaly detect models...')

        model_file = os.path.join(tablet_model_config.MODEL_DIR, tablet_model_config.MODEL_FILE_NAME)
        if not os.path.exists(model_file):
            logger.error('model file not found!')
            return

        self.model = keras.models.load_model(model_file)

        ohe_file = os.path.join(tablet_model_config.MODEL_DIR, tablet_model_config.TABLET_OHE_NAME)
        with open(ohe_file, 'rb') as f:
            self.ohe = pickle.load(f)

        scaler_file = os.path.join(tablet_model_config.MODEL_DIR, tablet_model_config.TABLET_SCALER_NAME)
        with open(scaler_file, 'rb') as f:
            self.scaler = pickle.load(f)

        logger.info('loading anomaly detect models done')

    def __load_anomaly_error_range(self):
        """
        读取异常的切分点与误差数据
        :return:
        """
        logger.info('loading anomaly error range...')
        sql = "SELECT cut_point, upper_rate, lower_rate FROM tablet_anomaly_one_day_error_range ORDER BY cut_point"
        error_range = mysql_prediction_processor.load_sql(sql)
        # 将下界设为负值
        error_range['lower_rate'] = -error_range['lower_rate']
        self.cut_points = [-np.inf] + error_range['cut_point'].tolist()[:-1] + [np.inf]
        cut_range = pd.cut(error_range['cut_point'], self.cut_points)
        cut_range = cut_range.astype(str)
        error_range = error_range.iloc[:-1, :]
        error_range['cut_range'] = cut_range[1:].tolist()
        self.error_range = error_range

    def process_history_price(self):
        """
        计算历史价格
        :return:
        """
        detect_data = self.data[self.data['settle_list_create_date'] == self.history_date].copy()
        if detect_data.empty:
            logger.info('detect data is empty, SKIP!')
            return detect_data
        detect_data['period'] = '0'

        detect_x_inputs = self.ohe.transform(detect_data[tablet_model_config.TABLET_FEATURES])

        predict_price = self.model.predict(detect_x_inputs)
        detect_data['predict_price'] = np.round(
            self.scaler.inverse_transform(predict_price.reshape((-1, 1))).flatten()).astype(int)
        detect_data['predict_price'] = detect_data['predict_price'].where(detect_data['predict_price'] >= MAX_EVN_PRICE,
                                                                          MAX_EVN_PRICE)
        detect_data['margin'] = detect_data['predict_price'] - detect_data['item_quotation_price_num']
        detect_data['margin_rate'] = np.round(detect_data['margin'] / detect_data['item_quotation_price_num'], 4)

        rmse, mae = np.sqrt(np.mean(np.square(detect_data['margin']))), np.mean(np.abs(detect_data['margin']))
        logger.info('history rmse@{:.2f} mae@{:.2f}'.format(rmse, mae))
        sql = """
        REPLACE INTO tablet_model_history_evaluation(history_date, rmse, mae, size)
        VALUES('{}', {}, {}, {})
        """.format(self.history_date.strftime('%Y-%m-%d'), rmse, mae, detect_data.shape[0])
        mysql_prediction_processor.execute_sql(sql)

        return detect_data

    def process_anomaly(self):
        """
        处理异常数据
        :return:
        """
        logger.info('processing anomaly data...')
        if self.model is None:
            logger.critical('model is None!')
            return

        detect_data = self.process_history_price()
        if detect_data.empty:
            self.anomaly_data = pd.DataFrame()
            return

        detect_data['cut_range'] = pd.cut(detect_data['item_quotation_price_num'], self.cut_points)
        detect_data['cut_range'] = detect_data['cut_range'].astype(str)
        detect_data = pd.merge(detect_data, self.error_range, on='cut_range')
        anomaly_data = detect_data[(detect_data['margin_rate'] > detect_data['upper_rate']) |
                                   (detect_data['margin_rate'] < detect_data['lower_rate'])].copy()
        logger.info('detecting anomaly data size@{}'.format(anomaly_data.shape))

        anomaly_data['lookup_key'] = anomaly_data['product_sku_key'].astype(str) + \
            anomaly_data['product_level_name'] + anomaly_data['item_quotation_price_num'].astype(str) + \
            format_date_string(self.history_date)
        self.anomaly_data = anomaly_data

    def process_anomaly_reference(self):
        """
        处理异常数据相关的参考指标
        :return:
        """
        if self.anomaly_data is None:
            logger.critical('anomaly data is None!')
            return
        if self.anomaly_data.empty:
            logger.info('anomaly data is empty, SKIP...')
            return

        history_data = self.data[self.data['settle_list_create_date'] < self.history_date].copy()
        # 计算每天的sku/level的出货价
        history_data_summary = history_data.groupby(['settle_list_create_date', 'product_sku_key',
                                                     'product_level_name'], as_index=False).agg(
            {'item_quotation_price_num': 'mean'})
        history_data_summary = history_data_summary.sort_values('settle_list_create_date')
        # 取sku/level最后的5次平均出货价
        history_data_summary = history_data_summary.groupby(['product_sku_key', 'product_level_name'],
                                                            as_index=False).apply(
            lambda df: df.tail(5)).reset_index(drop=True)
        history_data_summary.rename(columns={'settle_list_create_date': 'settle_date'}, inplace=True)
        anomaly_info = self.anomaly_data[['settle_list_create_date', 'product_sku_key', 'product_level_name']].copy()
        anomaly_info.rename(columns={'settle_list_create_date': 'stats_date'}, inplace=True)
        anomaly_reference = pd.merge(history_data_summary, anomaly_info,
                                     on=['product_sku_key', 'product_level_name'])
        self.anomaly_reference = anomaly_reference[['stats_date', 'product_sku_key', 'product_level_name',
                                                    'settle_date', 'item_quotation_price_num']].drop_duplicates()

    def save_anomaly_data(self):
        """
        保存异常数据
        :return:
        """
        if self.anomaly_data is None:
            logger.critical('anomaly data is None!')
            return
        if self.anomaly_data.empty:
            logger.info('anomaly data is empty, SKIP...')
            return

        logger.info('saving anomaly data...')

        logger.info('loading sku name...')
        sku_id_list = self.anomaly_data['product_sku_key'].drop_duplicates().tolist()
        sku_id_cond = ','.join(str(x) for x in sku_id_list)
        sku_name_sql = """
        SELECT product_sku_id AS product_sku_key, product_sku_name 
        FROM dim.dim_product_sku 
        WHERE product_sku_id in ({})""".format(sku_id_cond)
        sku_names = presto_processor.load_sql(sku_name_sql)
        self.anomaly_data = pd.merge(self.anomaly_data, sku_names, on='product_sku_key')

        insert_detection_sql = """
        INSERT INTO tablet_anomaly_one_day_detection(sku_id, stats_date, product_sku_name, product_level_name, 
        lookup_key, predict_price, sell_price, error_rate)
        VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
        """
        self.anomaly_data['stats_date'] = self.anomaly_data['settle_list_create_date'].map(
            lambda x: x.strftime('%Y-%m-%d'))
        one_day_detection = self.anomaly_data[['product_sku_key', 'stats_date', 'product_sku_name',
                                               'product_level_name', 'lookup_key', 'predict_price',
                                               'item_quotation_price_num', 'margin_rate']]
        one_day_detection = one_day_detection.drop_duplicates()
        mysql_prediction_processor.execute_insert_sql(insert_detection_sql,
                                                      one_day_detection.astype(object).where(
                                                          pd.notnull(one_day_detection), None).to_records(
                                                          index=False).tolist())
        insert_lookup_mapping_sql = """
        INSERT INTO tablet_anomaly_one_day_mapping(document_item_id, lookup_key)
        VALUES(%s, %s)
        """
        mysql_prediction_processor.execute_insert_sql(insert_lookup_mapping_sql,
                                                      self.anomaly_data[['document_item_id', 'lookup_key']].to_records(
                                                          index=False).tolist())

        reference_sql = """
        INSERT INTO tablet_anomaly_reference(stats_date, sku_id, product_level_name, settle_date, settle_price)
        VALUES(%s, %s, %s, %s, %s)
        """
        self.anomaly_reference['stats_date'] = self.anomaly_reference['stats_date'].map(
            lambda x: x.strftime('%Y-%m-%d'))
        self.anomaly_reference['settle_date'] = self.anomaly_reference['settle_date'].map(
            lambda x: x.strftime('%Y-%m-%d'))
        mysql_prediction_processor.execute_insert_sql(reference_sql,
                                                      self.anomaly_reference[
                                                          ['stats_date', 'product_sku_key', 'product_level_name',
                                                           'settle_date', 'item_quotation_price_num']
                                                      ].to_records(index=False).tolist())

    def launch_anomaly_detection(self):
        """
        启动异常检监测
        :return:
        """
        if self.model is None:
            return

        self.process_anomaly()
        self.process_anomaly_reference()
        self.save_anomaly_data()
