package com.feidee.fd.sml.algorithm.component.feature
import com.feidee.fdspark.transformer.NGramExtend
import org.apache.spark.ml.PipelineStage
import org.apache.spark.sql.DataFrame

/**
  * @author xiongjun
  * @date 2019/8/29 15:15
  * @description
  * @reviewer
  */
case class NGramExtendEncoderParam (
                               override val input_pt: String,
                               override val output_pt: String,
                               override val hive_table: String,
                               override val flow_time: String,
                               override val inputCol: String,
                               override val outputCol: String,
                               override val preserveCols: String,
                               override val modelPath: String,
                               // 最小的 n-gram 长度。>= 1（如果输入列词数量小于 n 值，则输出长度为 0 的数组）。默认值为：2
                               n: Int,
                               blank:Boolean
                             ) extends FeatureParam {

  def this() = this(null, null, null, null, "input", "features", null, null,2,false)

  override def verify(): Unit = {
    super.verify()
    require(n >= 1,"n should be greater than or equal to 1!")
  }

  override def toMap: Map[String, Any] = {
    var map = super.toMap
    map += ("n" -> n)
    map += ("blank" -> blank)
    map
  }
}
class NGramExtendEncoder extends AbstractFeatureEncoder[NGramExtendEncoderParam] {

  override def setUp(param: NGramExtendEncoderParam, data: DataFrame): Array[PipelineStage] = {
    val ngram = new NGramExtend()
      .setInputCol(param.inputCol)
      .setOutputCol(param.outputCol)
      .setBlank(param.blank)
      .setN(param.n)

    Array(ngram)
  }
}
object NGramExtendEncoder {

  def apply(paramStr: String): Unit = {
    new NGramExtendEncoder()(paramStr)
  }

  def main(args: Array[String]): Unit = {
    NGramExtendEncoder(args(0))
  }
}
