package com.feidee.fd.sml.algorithm.component.feature

import org.apache.spark.ml.PipelineStage
import org.apache.spark.ml.feature.Bucketizer
import org.apache.spark.sql.DataFrame

/**
  * @author YongChen
  * @date 2019/3/22 3:18
  * @description
  * @reviewer songhaicheng
  */
case class BucketEncoderParam(
                               override val input_pt: String,
                               override val output_pt: String,
                               override val hive_table: String,
                               override val flow_time: String,
                               override val inputCol: String,
                               override val outputCol: String,
                               override val preserveCols: String,
                               override val modelPath: String,
                               // 将连续特性进行离散化的切分点，长度大于等于 3，并且数组递增
                               splits: Array[Double],
                               // 如何处理无效的条目，支持 [skip, error, keep]，默认是error
                               handleInvalid: String
                               //parent  Estimator[Bucketizer]   目前暂不支持
                             ) extends FeatureParam {

  def this() = this(null, null, null, null, "input", "features", null, null , Array.empty[Double] , "error")

  override def verify(): Unit = {
    super.verify()
    require(splits.length >= 3 && increaseArray(splits), "参数 splits 长度必须大于或等于 3，且是一个递增数组")
    val handles = Array("skip", "error", "keep")
    require(handles.contains(handleInvalid.toLowerCase), s"param handleInvalid only accepts " +
      s"${handles.mkString("[", ", ", "]")}, but has $handleInvalid")
  }

  def increaseArray(split : Array[Double]): Boolean={
    for (i<- 1 until split.length) {
      if(split(i) < split(i-1)) {
        false
      }
    }
    true
  }

  override def toMap: Map[String, Any] = {
    var map = super.toMap
    map += ("splits" -> splits)
    map += ("handleInvalid" -> handleInvalid)
    map
  }
}


class BucketEncoder extends AbstractFeatureEncoder[BucketEncoderParam] {


  override def setUp(param: BucketEncoderParam, data: DataFrame): Array[PipelineStage] = {
    val bkt = new Bucketizer()
      .setInputCol(param.inputCol)
      .setOutputCol(param.outputCol)
      .setHandleInvalid(param.handleInvalid)
      .setSplits(param.splits)
      //.setParent()

    Array(bkt)
  }
}


object BucketEncoder {

  def apply(paramStr: String): Unit = {
    new BucketEncoder()(paramStr)
  }

  def main(args: Array[String]): Unit = {
    BucketEncoder(args(0))
  }

}