package com.feidee.fd.sml.algorithm.component.ml.regression

import com.feidee.fd.sml.algorithm.component.ml.{AbstractMLComponent, MLParam}
import com.feidee.fdspark.transformer.{MetaStorage, ModelType}
import org.apache.spark.SparkException
import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.sql.DataFrame

/**
  * @Author songhaicheng
  * @Date 2019/3/26 16:35
  * @Description
  * @Reviewer
  */
abstract class AbstractRegressionComponent[A <: MLParam](implicit m:Manifest[A])
  extends AbstractMLComponent[A] with Serializable {

  /**
    * 训练回归算法模型
    * @param param 算法运行参数
    * @param data  输入（训练用）数据
    * @return      算法模型
    */
  override def train(param: A, data: DataFrame): PipelineModel = {
    // 检查特征列和标签列是否存在
    val fieldNames = data.schema.fieldNames
    if (!fieldNames.contains(param.featuresCol)) {
      throw new SparkException(s"特征列 ${param.featuresCol} 不存在训练数据列 ${fieldNames.mkString("[", ", ", "]")} 中")
    }
    if (!fieldNames.contains(param.labelCol)) {
      throw new SparkException(s"标签列 ${param.labelCol} 不存在训练数据列 ${fieldNames.mkString("[", ", ", "]")} 中")
    }

    // 存储训练参数元信息
    val meta = new MetaStorage()
      .setModelType(ModelType.Algorithm_Regression)
      .setParameters(param.toMap)
      .setFields(data.schema.fieldNames)

    // 执行流程：保存训练参数元数据 》 输出结果
    val pipeline = new Pipeline()
      .setStages(Array(meta, setUp(param)))
    val model = pipeline.fit(data)

    model
  }

  /**
    * 根据参数，计算回归参数指标
    * @param param    模型训练时的参数
    * @param labels   回归模型
    * @param data     评估用的 DataFrame
    * @return         指标值
    */
  override def calculateMetrics(param: A, model: PipelineModel, data: DataFrame): Map[String, Double] = {
    var metrics = Map[String, Double]()

    val evaluator = new RegressionEvaluator()
        .setLabelCol(param.labelCol)
        .setPredictionCol(param.predictionCol)

    for (i <- param.metrics.indices) {
      param.metrics(i).toLowerCase match {
        case "mse" =>
          evaluator.setMetricName("mse")
          metrics += (param.metrics(i) -> evaluator.evaluate(data))
        case "rmse" =>
          evaluator.setMetricName("rmse")
          metrics += (param.metrics(i) -> evaluator.evaluate(data))
        case "r2" =>
          evaluator.setMetricName("r2")
          metrics += (param.metrics(i) -> evaluator.evaluate(data))
        case "mae" =>
          evaluator.setMetricName("mae")
          metrics += (param.metrics(i) -> evaluator.evaluate(data))
        case _ => logInfo(s"UNKNOWN METRIC ${param.metrics(i)}")
      }
    }

    metrics
  }

}
