package com.feidee.fd.sml.algorithm.component.validation.assertion

import com.feidee.fd.sml.algorithm.component.validation.ValidationParam
import com.feidee.fd.sml.algorithm.forecast.{SparkForecast, StageFinder}
import com.feidee.fd.sml.algorithm.util.Constants
import com.feidee.fdspark.transformer.MetaStorage
import org.apache.spark.SparkException
import org.apache.spark.ml.evaluation.{BinaryClassificationEvaluator, Evaluator, MulticlassClassificationEvaluator, RegressionEvaluator}
import org.apache.spark.ml.feature.StringIndexerModel
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types.{DoubleType, StringType}

/**
  * @Author songhaicheng
  * @Date 2019/5/13 11:30
  * @Description
  * @Reviewer
  */
case class MetricAssertion(
                            override val op: String,
                            override val threshold: Double,
                            metric: String) extends Assertion {

  def this() = this("=", 0.0, "accuracy")

  val availableMetrics = Array("accuracy", "precision", "recall", "f1", "auc", "pr", "mse", "rmse", "r2", "mae")

  override def assert(data: DataFrame, validationParam: ValidationParam): Boolean = {
    val forecast = new SparkForecast(data.sparkSession,null, validationParam.sparkModels: _*)
    // 从模型中提取参数信息
    val meta = new StageFinder[MetaStorage](forecast.models.last).findWithOrder().get
    val labelCol = meta.getParameters("labelCol").toString
    val predictionCol = meta.getParameters("predictionCol").toString
    // 拿模型训练中生成的 label 索引字典反索引化 label 列
    var rawData = data
    if (!data.schema(labelCol).dataType.isInstanceOf[DoubleType] ||
      !data.schema(predictionCol).dataType.isInstanceOf[DoubleType]) {
      val dict = new StageFinder[StringIndexerModel](forecast.models.last).findWithOrder().get.labels
      import org.apache.spark.sql.functions.udf
      // 利用字典表索引化
      val idx = udf { str: String => {
        dict.indexOf(str).toDouble
      }}
      rawData = rawData
        .withColumn(labelCol, idx(rawData(labelCol).cast(StringType)))
        .withColumn(predictionCol, idx(rawData(predictionCol).cast(StringType)))
    }

    val evaluator: Evaluator = metric.toLowerCase match {
      case "accuracy" | "precision" | "recall" | "f1" =>
        new MulticlassClassificationEvaluator()
          .setPredictionCol(predictionCol)
          .setLabelCol(labelCol)
          .setMetricName(Constants.classificationMetricNames(metric.toLowerCase))
      case "auc" | "pr" =>
        require(meta.getParameters.get("probabilityCol").nonEmpty,
          "unsupported binary metric [auc and pr] since lacking of probabilityCol")
        val probabilityCol = meta.getParameters("probabilityCol").toString
        new BinaryClassificationEvaluator()
          .setRawPredictionCol(probabilityCol)
          .setLabelCol(labelCol)
          .setMetricName(Constants.classificationMetricNames(metric.toLowerCase))
      case "mse" | "rmse" | "r2" | "mae" =>
        new RegressionEvaluator()
          .setLabelCol(labelCol)
          .setPredictionCol(predictionCol)
          .setMetricName(metric.toLowerCase)
      case _ => throw new SparkException(s"暂只支持 [${availableMetrics.mkString(", ")}] 指标，但设置的是 $metric")
    }
    val real = evaluator.evaluate(rawData)

    val pass = op.trim match {
      case ">" =>
        real > threshold
      case ">=" =>
        real >= threshold
      case "<" =>
        real < threshold
      case "<=" =>
        real <= threshold
      case "=" =>
        real == threshold
      case "!=" =>
        real != threshold
      case _ => throw new SparkException(s"暂只支持 [>, >=, <, <=, =, !=] 关系符，但设置的是 $op")
    }
    if (pass) {
      logInfo(s"校验 ${metric.trim} →_→ $real（实际）${op.trim} $threshold（阈值）通过")
    } else {
      logError(s"校验 ${metric.trim} →_→ $real（实际）${op.trim} $threshold（阈值）不通过")
    }
    pass
  }
}
