package cn.seecoder.ai.algorithm.ml
import cn.seecoder.ai.model.bo.TrainParamsBO
import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.ml.{PipelineModel, PipelineStage}
import org.apache.spark.sql.DataFrame

import java.util

/**
 * todo: 实现回归方法的模板算法
 * @author   fanyanpeng
 * @date 2023/4/17 5:13
 * @param null
 * @return
 */
class BaseRegression extends BaseClassification {


  override def evaluate(pipelineModel: PipelineModel, subTestSet: DataFrame, machineLearningStage: PipelineStage, trainParams: TrainParamsBO): util.Map[String, Object] = {

    val resultMap: util.Map[String, Object] = new util.HashMap[String, Object]()
    // Make predictions.
    val predictions = pipelineModel.transform(subTestSet)

    // Select example rows to display.
    predictions.select("prediction", "label", "features").show(5)

    // Select (prediction, true label) and compute test error.
    val evaluator = new RegressionEvaluator()
      .setLabelCol("label")
      .setPredictionCol("prediction")

    /*
    *
    case "rmse" => metrics.rootMeanSquaredError
    case "mse" => metrics.meanSquaredError
    case "r2" => metrics.r2
    case "mae" => metrics.meanAbsoluteError
    case "var" => metrics.explainedVariance
    * */
    val rmse = evaluator.setMetricName("rmse")evaluate(predictions)
    val mse = evaluator.setMetricName("mse")evaluate(predictions)
    val r2 = evaluator.setMetricName("r2")evaluate(predictions)
    val mae = evaluator.setMetricName("mae")evaluate(predictions)
    val _var = evaluator.setMetricName("var")evaluate(predictions)

    resultMap.put("rmse: ", rmse.toString)
    resultMap.put("mse: ", mse.toString)
    resultMap.put("r2: ", r2.toString)
    resultMap.put("mae: ", mae.toString)
    resultMap.put("_var: ", _var.toString)
    resultMap
  }

}
