package com.feidee.fd.sml.algorithm.component.feature

import org.apache.spark.ml.PipelineStage
import org.apache.spark.ml.feature.MinMaxScaler
import org.apache.spark.sql.DataFrame

/**
  * @author YongChen
  * @date 2019/3/22 3:18
  * @description
  * @reviewer dongguosheng
  */
case class MinMaxScaleEncoderParam(
                                    override val input_pt: String,
                                    override val output_pt: String,
                                    override val hive_table: String,
                                    override val flow_time: String,
                                    override val inputCol: String,
                                    override val outputCol: String,
                                    override val preserveCols: String,
                                    override val modelPath: String,
                                    // 指定区间的下限，默认是 0.0
                                    min: Double,
                                    // 指定区间的上限，默认是 1.0
                                    max: Double
                                  ) extends FeatureParam {

  def this() = this(null, null, null, null, "input", "features", null, null, 0.0, 1.0)

  override def verify(): Unit = {
    super.verify()
    require(max > min, "参数 max 必须大于参数 min")
  }

  override def toMap: Map[String, Any] = {
    var map = super.toMap
    map += ("min" -> min)
    map += ("max" -> max)
    map
  }
}


class MinMaxScaleEncoder extends AbstractFeatureEncoder[MinMaxScaleEncoderParam] {

  override def setUp(param: MinMaxScaleEncoderParam, data: DataFrame): Array[PipelineStage]= {


    val mmsl = new MinMaxScaler()
      .setInputCol(param.inputCol)
      .setOutputCol(param.outputCol)
      .setMin(param.min)
      .setMax(param.max)

     Array(mmsl)
  }

}
object MinMaxScaleEncoder {

  def apply(paramStr: String): Unit = {
    new MinMaxScaleEncoder()(paramStr)
  }

  def main(args: Array[String]): Unit = {
    MinMaxScaleEncoder(args(0))
  }

}
