package com.feidee.fd.sml.algorithm.component.feature

import org.apache.spark.ml.PipelineStage
import org.apache.spark.ml.feature.Normalizer
import org.apache.spark.sql.DataFrame

/**
  * @Author tangjinyuan
  * @Date 2019/03/21 16:33
  * @Description NormalizerEncoder  特征组件
  * @Reviewer dongguosheng
  */
case class NormalizeEncoderParam(
                                  override val input_pt: String,
                                  override val output_pt: String,
                                  override val hive_table: String,
                                  override val flow_time: String,
                                  override val inputCol: String,
                                  override val outputCol: String,
                                  override val preserveCols: String,
                                  override val modelPath: String,
                                  // 每个向量正则化范数的阶数，>= 1，默认 2
                                  p: Double
                                ) extends FeatureParam {

  def this() = this(null, null, null, null, "input", "features", null, null, 2)

  override def verify(): Unit = {
    super.verify()
    require(p >= 1,"p must be greater or equal to 1!")
  }

  override def toMap: Map[String, Any] = {
    var map = super.toMap
    map += ("p" -> p)
    map
  }
}


class NormalizeEncoder extends AbstractFeatureEncoder[NormalizeEncoderParam] {

  override def setUp(param: NormalizeEncoderParam, data: DataFrame): Array[PipelineStage] = {

    val normalizer = new Normalizer()
      .setP(param.p)
      .setInputCol(param.inputCol)
      .setOutputCol(param.outputCol)

    Array(normalizer)
  }


}
object NormalizeEncoder {

  def apply(paramStr: String): Unit = {
    new NormalizeEncoder()(paramStr)
  }

  def main(args: Array[String]): Unit = {
    NormalizeEncoder(args(0))
  }
}



