import os
import pickle

import numpy as np
import pandas as pd
from tensorflow import keras

from src.mobile.model import mobile_model_config
from src.mobile.predict.mobile_price_config import MAX_EVN_PRICE
from src.utils.config import logger
from src.utils.db_processor import mysql_prediction_processor, presto_processor
from src.utils.util import format_date_string, check_date_str, sending_exception


def cal_price_period(s, as_of_date, cut_days=14):
    """
    计算周期价格周期
    :param s: 日期序列
    :param as_of_date: 相对日期
    :param cut_days: 周期天数
    :return:
    """
    as_of_date = check_date_str(as_of_date)
    diff_days = as_of_date - s
    diff_days = diff_days.days - 1
    period = diff_days // cut_days
    return period


def cal_previous_sell_price(df):
    """
    计算周期内的出货价
    - 周期内平均出货价与最后一天出货价中最小值
    :param df: 分组数据
    :return:
    """
    avg_sell_price = df['item_quotation_price_num'].mean()
    latest_date_df = df[df['shop_out_date'] == df['shop_out_date'].max()]
    latest_min_price = latest_date_df['item_quotation_price_num'].min()

    return min(avg_sell_price, latest_min_price)


class MobileAnomalyDetector:
    def __init__(self, data, history_date):
        """

        :param data: 数据
        :param history_date: 模型计算数据日期
        """
        self.data = data
        self.history_date = history_date

        self.anomaly_data = None
        self.model = None
        self.anomaly_reference = None

        self.__load_models()
        self.__load_anomaly_error_range()

    def __load_models(self):
        """
        读取最新的模型与预处理器
        :return:
        """
        logger.info('loading anomaly detect models...')

        model_file = os.path.join(
            mobile_model_config.MODEL_DIR, mobile_model_config.MODEL_FILE_NAME)
        if not os.path.exists(model_file):
            logger.error('model file not found!')
            return

        self.model = keras.models.load_model(model_file)

        ohe_file = os.path.join(
            mobile_model_config.MODEL_DIR, mobile_model_config.MOBILE_OHE_NAME)
        with open(ohe_file, 'rb') as f:
            self.ohe = pickle.load(f)

        scaler_file = os.path.join(
            mobile_model_config.MODEL_DIR, mobile_model_config.MOBILE_SCALER_NAME)
        with open(scaler_file, 'rb') as f:
            self.scaler = pickle.load(f)

        logger.info('loading anomaly detect models done')

    def __load_anomaly_error_range(self):
        """
        读取异常的切分点与误差数据
        :return:
        """
        logger.info('loading anomaly error range...')
        sql = "SELECT cut_point, upper_rate, lower_rate FROM mobile_anomaly_one_day_error_range ORDER BY cut_point"
        error_range = mysql_prediction_processor.load_sql(sql)
        # 将下界设为负值
        error_range['lower_rate'] = -error_range['lower_rate']
        self.cut_points = [-np.inf] + \
            error_range['cut_point'].tolist()[:-1] + [np.inf]
        cut_range = pd.cut(error_range['cut_point'], self.cut_points)
        cut_range = cut_range.astype(str)
        error_range = error_range.iloc[:-1, :]
        error_range['cut_range'] = cut_range[1:].tolist()
        self.error_range = error_range

    def process_history_price(self):
        """
        计算历史价格
        :return:
        """
        detect_data = self.data[self.data['settle_list_create_date']
                                == self.history_date].copy()
        if detect_data.empty:
            logger.info('detect data is empty, SKIP!')
            return detect_data
        detect_data['period'] = '0'

        detect_x_inputs = self.ohe.transform(
            detect_data[mobile_model_config.MOBILE_FEATURES])

        predict_price = self.model.predict(detect_x_inputs)
        detect_data['predict_price'] = np.round(
            self.scaler.inverse_transform(predict_price.reshape((-1, 1))).flatten()).astype(int)
        detect_data['predict_price'] = detect_data['predict_price'].where(detect_data['predict_price'] >= MAX_EVN_PRICE,
                                                                          MAX_EVN_PRICE)
        detect_data['margin'] = detect_data['predict_price'] - \
            detect_data['item_quotation_price_num']
        detect_data['margin_rate'] = np.round(
            detect_data['margin'] / detect_data['item_quotation_price_num'], 4)

        rmse, mae = np.sqrt(np.mean(np.square(detect_data['margin']))), np.mean(
            np.abs(detect_data['margin']))
        logger.info('history rmse@{:.2f} mae@{:.2f}'.format(rmse, mae))
        sql = """
        REPLACE INTO mobile_model_history_evaluation(history_date, rmse, mae, size)
        VALUES('{}', {}, {}, {})
        """.format(self.history_date.strftime('%Y-%m-%d'), rmse, mae, detect_data.shape[0])
        mysql_prediction_processor.execute_sql(sql)

        return detect_data

    def process_anomaly(self):
        """
        处理异常数据
        :return:
        """
        logger.info('processing anomaly data...')
        if self.model is None:
            logger.critical('model is None!')
            return

        detect_data = self.process_history_price()
        if detect_data.empty:
            self.anomaly_data = pd.DataFrame()
            return

        # 按价格段进行分组
        detect_data['cut_range'] = pd.cut(
            detect_data['item_quotation_price_num'], self.cut_points)
        detect_data['cut_range'] = detect_data['cut_range'].astype(str)
        detect_data = pd.merge(detect_data, self.error_range, on='cut_range')
        anomaly_data = detect_data[(detect_data['margin_rate'] > detect_data['upper_rate']) |
                                   (detect_data['margin_rate'] < detect_data['lower_rate'])].copy()
        logger.info('detecting anomaly data size@{}'.format(
            anomaly_data.shape))

        anomaly_data['lookup_key'] = anomaly_data['product_sku_key'].astype(str) + \
            anomaly_data['product_level_name'] + anomaly_data['item_quotation_price_num'].astype(str) + \
            format_date_string(self.history_date)
        self.anomaly_data = anomaly_data

    def process_anomaly_reference(self):
        """
        处理异常数据相关的参考指标
        :return:
        """
        if self.anomaly_data is None:
            logger.critical('anomaly data is None!')
            return
        if self.anomaly_data.empty:
            logger.info('anomaly data is empty, SKIP...')
            return

        logger.info('processing anomaly reference...')

        anomaly_sku_level = self.anomaly_data[[
            'product_sku_key', 'product_level_name']].drop_duplicates()
        history_data = self.data[self.data['shop_out_date']
                                 < self.history_date].copy()
        # 计算周期
        history_data['period'] = history_data['shop_out_date'].apply(
            cal_price_period, as_of_date=self.history_date)
        # 只选取最近3个周期的数据
        history_data = history_data[history_data['period'] <= 2]
        anomaly_history_data = pd.merge(anomaly_sku_level, history_data, on=['product_sku_key', 'product_level_name'],
                                        how='left')
        # 存在历史数据
        anomaly_history_data_exist = anomaly_history_data[pd.notnull(
            anomaly_history_data['product_no'])].copy()
        if not anomaly_history_data_exist.empty:
            # 计算每个周期内的价格
            anomaly_reference_exist = anomaly_history_data_exist.groupby(['product_sku_key',
                                                                          'product_level_name', 'period']).apply(
                cal_previous_sell_price)
            anomaly_reference_exist = anomaly_reference_exist.to_frame().reset_index()
            anomaly_reference_exist.columns = [
                'product_sku_key', 'product_level_name', 'period', 'sell_price']

            # 长数据转为宽数据
            anomaly_reference_exist = anomaly_reference_exist.pivot_table(index=['product_sku_key',
                                                                                 'product_level_name'],
                                                                          columns='period').reset_index()
            # 检查3个周期是否都有
            column_len = len(anomaly_reference_exist.columns)
            if column_len == 5:
                anomaly_reference_exist.columns = ['product_sku_key', 'product_level_name', 'old_sell_price',
                                                   'middle_sell_price', 'recent_sell_price']
            else:
                periods = anomaly_history_data_exist['period'].astype(
                    int).tolist()
                base_columns = ['product_sku_key', 'product_level_name']
                missing_columns = []

                if 0 in periods:
                    base_columns.append('recent_sell_price')
                else:
                    missing_columns.append('recent_sell_price')
                if 1 in periods:
                    base_columns.append('middle_sell_price')
                else:
                    missing_columns.append('middle_sell_price')
                if 2 in periods:
                    base_columns.append('old_sell_price')
                else:
                    missing_columns.append('old_sell_price')

                anomaly_reference_exist.columns = base_columns
                for col in missing_columns:
                    anomaly_reference_exist[col] = np.nan
        else:
            anomaly_reference_exist = pd.DataFrame()

        anomaly_history_data_no_exist = anomaly_history_data[pd.isnull(
            anomaly_history_data['product_no'])].copy()
        if not anomaly_history_data_no_exist.empty:
            anomaly_reference_no_exist = anomaly_history_data_no_exist[['product_sku_key',
                                                                        'product_level_name']].drop_duplicates().copy()
            anomaly_reference_no_exist['old_sell_price'] = np.nan
            anomaly_reference_no_exist['middle_sell_price'] = np.nan
            anomaly_reference_no_exist['recent_sell_price'] = np.nan
        else:
            anomaly_reference_no_exist = pd.DataFrame()

        anomaly_reference = pd.concat([anomaly_reference_exist, anomaly_reference_no_exist],
                                      ignore_index=True, sort=False)

        self.anomaly_data = pd.merge(self.anomaly_data, anomaly_reference, on=[
                                     'product_sku_key', 'product_level_name'])

    def save_anomaly_data(self):
        """
        保存异常数据
        :return:
        """
        if self.anomaly_data is None:
            logger.critical('anomaly data is None!')
            return
        if self.anomaly_data.empty:
            logger.info('anomaly data is empty, SKIP...')
            return

        logger.info('saving anomaly data...')

        logger.info('loading sku name...')
        sku_id_list = self.anomaly_data['product_sku_key'].drop_duplicates(
        ).tolist()
        sku_id_cond = ','.join(str(x) for x in sku_id_list)
        sku_name_sql = """
        SELECT product_sku_id AS product_sku_key, product_sku_name 
        FROM dim.dim_product_sku 
        WHERE product_sku_id in ({})""".format(sku_id_cond)
        sku_names = presto_processor.load_sql(sku_name_sql)
        self.anomaly_data = pd.merge(
            self.anomaly_data, sku_names, on='product_sku_key')

        insert_detection_sql = """
        INSERT INTO mobile_anomaly_one_day_detection(sku_id, product_sku_name, product_level_name, lookup_key, 
        old_sell_price, middle_sell_price, recent_sell_price, predict_price, sell_price, error_rate)
        VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
        """
        one_day_detection = self.anomaly_data[['product_sku_key', 'product_sku_name', 'product_level_name',
                                               'lookup_key', 'old_sell_price', 'middle_sell_price', 'recent_sell_price',
                                               'predict_price', 'item_quotation_price_num', 'margin_rate']]
        one_day_detection = one_day_detection.drop_duplicates()
        mysql_prediction_processor.execute_insert_sql(insert_detection_sql,
                                                      one_day_detection.astype(object).where(
                                                          pd.notnull(one_day_detection), None).to_records(
                                                          index=False).tolist())
        insert_lookup_mapping_sql = """
        INSERT INTO mobile_anomaly_one_day_mapping(document_item_id, lookup_key)
        VALUES(%s, %s)
        """
        mysql_prediction_processor.execute_insert_sql(insert_lookup_mapping_sql,
                                                      self.anomaly_data[['document_item_id', 'lookup_key']].to_records(
                                                          index=False).tolist())

    def launch_anomaly_detection(self):
        """
        启动异常检监测
        :return:
        """
        if self.model is None:
            return
        try:
            self.process_anomaly()
            self.process_anomaly_reference()
            self.save_anomaly_data()
        except Exception as e:
            sending_exception(
                '检测手机偏差率大的方法launch_anomaly_detection出错：{}'.format(e))
