package com.feidee.fd.sml.algorithm.component.ml.regression

import com.feidee.fd.sml.algorithm.component.ml.MLParam
import org.apache.spark.ml.PipelineStage
import org.apache.spark.ml.regression.RandomForestRegressor

/**
  * @Author JunxinWang, songhaicheng
  * @Date 2019/3/22 15:26
  * @Description 随机森林回归算法
  * @Reviewer YongChen
  */
case class RFRegressionParam(
                              override val input_pt: String,
                              override val output_pt: String,
                              override val hive_table: String,
                              override val flow_time: String,
                              override val modelPath: String,
                              override var labelCol: String,
                              override val featuresCol: String,
                              override var predictionCol: String,
                              override val metrics: Array[String],
                              // 获取checkpoint的轮训次数。阈值为：>=1。当该值为-1时，则停止使用checkpoint功能,默认值为10
                              checkpointInterval: Int,
                              // 信息增益计算方式，默认值为："variance"
                              impurity: String,
                              // 每个节点上用于下一步分裂的子特征集的策略。支持 "auto","all","onethird","sqrt","log2","n" 。默认值为 "auto"
                              featureSubsetStrategy: String,
                              // 用于离散化连续特征和选择如何在每个节点上拆分特征的最大容器数。阈值为：>=2 。 默认值为32
                              maxBins: Int,
                              // 树的深度。阈值：>= 0 。 若深度为n，则叶节点数为2^(n+1) -1。默认值为：5
                              maxDepth: Int,
                              // 在树节点上考虑的分割的最小信息增益。阈值：>=0.0。 默认值为0.0
                              minInfoGain: Double,
                              // 拆分后每个子级必须具有的最小实例数。阈值：>=1。 默认值为：1
                              minInstancesPerNode: Int,
                              // 要训练的树的数量。阈值：>=  1 。默认值为：20
                              numTrees: Int,
                              // 随机种子的参数。默认值为：10
                              seed: Long,
                              // 用于学习每个决策树的训练数据的分数,范围在(0,1],默认值为1.0
                              subsamplingRate: Double,
                              // 该算法是否缓存每个实例的节点ID。  默认值为false
                              cacheNodeIds: Boolean,
                              // 分配给柱状图聚合的最大内存（MB）。默认值为：256 MB
                              maxMemoryInMB: Int
                            ) extends MLParam {
  /**
    * 赋予参数默认值
    * @return
    */
  def this() = this(null, null, null, null, null,"label", "features", "prediction", new Array[String](0),
    10, "variance","auto", 32, 5, 0.0, 1,20 ,10, 1.0, false, 256)


  override def verify(): Unit = {
    super.verify()
    require(Array("auto", "all", "onethird", "sqrt", "log2").contains(featureSubsetStrategy),
      s"param featureSubsetStrategy only accepts[auto, all, onethird, sqrt, log2]," +
        s" but has $featureSubsetStrategy")
    require(Array("variance","gini", "entropy").contains(impurity),
      s"param impurity only accepts[variance,gini, entropy], but has $impurity")
    require(numTrees >= 1, "param numTrees must be not less than 1")
    require(maxDepth >= 0, "param maxDepth can't be negative")
    require(maxBins >= 2, "param maxBins must be not less than 2")
    require(minInstancesPerNode >= 1, "param numTrees must be not less than 1")
    require(checkpointInterval == -1 || checkpointInterval >= 1, "param checkpointInterval must be" +
      " equals to -1 or not less than 1")
    require(minInfoGain >= 0, "param minInfoGain can't be negative")
    require(subsamplingRate > 0 && subsamplingRate <= 1, "param subsamplingRate's range is (0, 1]")
  }

  override def toMap: Map[String, Any] = {
    var map = super.toMap
    map += ("checkpointInterval" -> checkpointInterval)
    map += ("impurity" -> impurity)
    map += ("featureSubsetStrategy" -> featureSubsetStrategy)
    map += ("maxBins" -> maxBins)
    map += ("maxDepth" -> maxDepth)
    map += ("minInfoGain" -> minInfoGain)
    map += ("minInstancesPerNode" -> minInstancesPerNode)
    map += ("numTrees" -> numTrees)
    map += ("seed" -> seed)
    map += ("subsamplingRate" -> subsamplingRate)
    map += ("cacheNodeIds" -> cacheNodeIds)
    map += ("maxMemoryInMB" -> maxMemoryInMB)
    map
  }
}


class RFRegressionComponent extends AbstractRegressionComponent[RFRegressionParam] {

  override def setUp(param: RFRegressionParam): PipelineStage = {
    val rf = new RandomForestRegressor()
      .setCheckpointInterval(param.checkpointInterval)
      .setImpurity(param.impurity)
      .setMaxBins(param.maxBins)
      .setMaxDepth(param.maxDepth)
      .setMinInfoGain(param.minInfoGain)
      .setMinInstancesPerNode(param.minInstancesPerNode)
      .setSeed(param.seed)
      .setSubsamplingRate(param.subsamplingRate)
      .setLabelCol(param.labelCol)
      .setPredictionCol(param.predictionCol)
      .setFeaturesCol(param.featuresCol)
      .setNumTrees(param.numTrees)
      .setFeatureSubsetStrategy(param.featureSubsetStrategy)
      .setMaxMemoryInMB(param.maxMemoryInMB)
      .setCacheNodeIds(param.cacheNodeIds)

    rf
  }

}

object RFRegressionComponent {
  def apply(paramStr: String): Unit = {
    new RFRegressionComponent()(paramStr)
  }

  def main(args: Array[String]): Unit = {
    RFRegressionComponent(args(0))
  }

}