package com.cxk.fe

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SparkSession, functions}

import scala.collection.mutable

object MeanEncoder {

  /**
    * lambda(n)=1 / (1 + exp((n - k) / f)
    *
    * @param k 当n = k时，lambda = 0.5，先验概率与后验概率的权重相同；当n > k时，lambda < 0.5。
    * @param f 控制函数在拐点附近的斜率，f越大，“坡”越缓。
    * @return 先验概率的权重λ
    */
  def prior_weight_func(k: Double)(f: Double): Long => Double = (n: Long) => {
    val pow = Math.pow(Math.E, (n - k) / f)
    if (pow.toString.equals("Infinity")) 0d else 1d / (1d + pow)
  }

  /**
    * 根据均值方差计算λ
    *
    * @param avg     均值大于0
    * @param std_dev 方差 大于1 控制函数在拐点附近的斜率，在均值附近“坡”较缓，离均值越远，斜率越大
    * @return lambda(n)=1 / (1 + exp((n - k) / f)
    */
  def priorWeightFunc(avg: Double, std_dev: Double): Long => Double = (x: Long) => {
    val exp = if (std_dev <= 1) x - avg else {
      val _d1 = 1.5 * std_dev //1.5倍方差距离,超过此距离则加快衰减速率
      val _d2 = 3 * std_dev //3倍方差距离,超过此距离则快速衰减到0或1

      if (x > avg - _d1 && x < avg + _d1) {
        (x - avg) / std_dev
      } else if (x > avg - _d2 && x < avg + _d2) {
        val sig = if (x < avg - _d1) 1 else -1
        x - avg + sig * (_d1 - 1.5)
      } else {
        val sig = if (x < avg - _d2) -1 else 1
        std_dev * x + sig * _d1 - (avg + sig * _d2) * std_dev + 1.5 * sig
      }
    }

    val pow = Math.pow(Math.E, exp)
    if (pow.toString.equals("Infinity")) 0d else 1d / (1d + pow)
  }

  def apply(spark: SparkSession, categorical_features: Seq[String], prior_weight_func: Long => Double, n_splits: Int = 5): MeanEncoder = new MeanEncoder(spark, categorical_features, prior_weight_func, n_splits)

  /**
    * 根据训练集学习到的状态对测试集进行转换
    *
    * @param data                 待转换的数据
    * @param categorical_features 需要编码的类别特征
    * @param learned_stats        已学习到的，每个类别特征值的mean-code
    * @param priorStats           每个标签类别值的先验概率
    * @param target_values        标签类别取值
    * @return 转换好的数据
    */
  def transformByLearnedStats(data: DataFrame, categorical_features: Seq[String], learned_stats: Map[String, DataFrame], priorStats: Map[String, Double], target_values: Seq[Any]): DataFrame = {
    var result: DataFrame = null
    val valueMap = Map[String, Any]()
    for (variable <- categorical_features) {
      val df = learned_stats.getOrElse(variable, null)
      if (df != null) {
        result = data.join(df, Seq(variable), "left")
      }
      for (i <- 1 until target_values.length) {
        val nf_name = variable + "_predict_" + target_values(i)
        valueMap.updated(nf_name, priorStats.get(nf_name))
      }
    }
    result.na.fill(valueMap)
  }

  /**
    * 在贝叶斯的架构下，利用所要预测的应变量（target variable），有监督地确定最适合这个定性特征的编码方式
    *
    * @param spark                会话句柄，需要spark引起做一些操作
    * @param categorical_features 需要编码的类别特征名
    * @param prior_weight_func    先验后验概率的权重系数
    * @param n_splits             为了避免过拟合，采用K折交叉序列编码
    */
  class MeanEncoder(val spark: SparkSession, val categorical_features: Seq[String], prior_weight_func: Long => Double, val n_splits: Int = 5) {

    private[this] val learned_stats = mutable.Map[String, DataFrame]()
    private[this] val learnedStats = mutable.Map[String, Double]()
    private[this] var target_values: Array[Any] = _

    /**
      * 学习到的各类别取值的编码
      *
      * @return key:类别名，value:已编码的表抽象
      */
    def mean_code: Seq[(String, DataFrame)] = learned_stats.toSeq

    /**
      * @return 各应变量取值的先验概率
      */
    def prior: Seq[(String, Double)] = learnedStats.toSeq

    /**
      * 序列
      *
      * @param data       训练集，需要包括@categorical_feature所有列和应变量列
      * @param label_name 应变量列名
      * @return 训练完毕的模型
      */
    def fit(data: DataFrame, label_name: String): MeanEncoder = {
      target_values = data.select(label_name).distinct().orderBy(label_name).rdd.map(row => row.get(0)).collect()
      val priorWeightFunc: Broadcast[Long => Double] = spark.sparkContext.broadcast(prior_weight_func)
      for (variable <- categorical_features) {
        var predict_df = data.select(variable).distinct()
        for (i <- 1 until target_values.length) {
          val df = fit(data.select(variable, label_name), variable, (label_name, target_values(i)), priorWeightFunc)
          predict_df = predict_df.join(df, Seq(variable))
        }
        learned_stats.update(variable, predict_df)
      }
      this
    }

    /**
      * 序列并转换
      *
      * @param data       训练集，需要包括@categorical_feature所有列和应变量列
      * @param label_name 应变量列名
      * @return 已完成编码的数据
      */
    def fit_transform(data: DataFrame, label_name: String): DataFrame = fit(data, label_name).transform(data)

    /**
      * 编码
      *
      * @param data 待编码的数据集
      * @return 已完成编码的数据
      */
    def transform(data: DataFrame): DataFrame = transformByLearnedStats(data, categorical_features, learned_stats.toMap, learnedStats.toMap, target_values.toSeq)

    //采用K折交叉进行训练
    private[this] def fit(data: DataFrame, variable: String, target: (String, Any), priorWeightFunc: Broadcast[Long => Double]): DataFrame = {
      val key = variable + "_predict_" + target._2
      var fit_df: DataFrame = null
      var preAvg = 0d

      if (n_splits > 1) {
        val weights = (1 to n_splits).map(_ => 1d / n_splits).toArray
        val data_split = data.randomSplit(weights)
        val avg = (current: Double, i: Int, preAvg: Double) => (i * preAvg + current) / (i + 1)

        for (i <- 0 until n_splits) {
          var train: DataFrame = null
          for (j <- data_split.indices if i != j) {
            train = if (train == null) data_split(j) else train.unionByName(data_split(j))
          }
          val (prior, df) = mean_encode_subroutine(train, data_split(i), variable, target, priorWeightFunc, key)
          fit_df = if (fit_df == null) df else fit_df.unionByName(df)
          preAvg = avg(prior, i, preAvg)
        }
      } else {
        val (prior, df) = mean_encode_subroutine(data, null, variable, target, priorWeightFunc, key)
        fit_df = df
        preAvg = prior
      }
      learnedStats.update(key, preAvg)
      fit_df.groupBy(variable).agg(functions.avg(key).alias(key))
    }

    //P = λP(y=target) + (1-λ)P(target=y|variable=k)
    private[this] def mean_encode_subroutine(train: DataFrame, test: DataFrame, variable: String, target: (String, Any), priorWeightFunc: Broadcast[Long => Double], mean_code_key: String): (Double, DataFrame) = {
      val training = train.selectExpr(variable, "CASE WHEN " + target._1 + "=" + target._2 + " THEN 1 ELSE 0 END AS predict_temp")
      val prior = training.selectExpr("AVG(predict_temp)").first().getDouble(0)
      val rowRDD = training.select(variable, "predict_temp").groupBy(variable)
        .agg(functions.mean("predict_temp"), functions.count("predict_temp"))
        .rdd.map(row => {
        val lambda = priorWeightFunc.value(row.getLong(2))
        val p = lambda * prior + (1 - lambda) * row.getDouble(1)
        Row(row.get(0), p)
      })

      val p_df = spark.createDataFrame(rowRDD, StructType(List(StructField(variable, StringType), StructField(mean_code_key, DoubleType))))

      val df = if (test == null) p_df else test.select(variable).join(p_df, Seq(variable), "left").na.fill(prior)
      (prior, df)
    }
  }

}
