package com.feidee.fd.sml.algorithm.component.feature

import org.apache.spark.ml.PipelineStage
import org.apache.spark.ml.feature.RegexTokenizer
import org.apache.spark.sql.DataFrame

/**
  * @Author JunxinWang
  * @Date 2019/3/22 16:31
  * @Description 分词器
  * @Reviewer YongChen
  */
case class TokenizeEncoderParam(
                                 override val input_pt: String,
                                 override val output_pt: String,
                                 override val hive_table: String,
                                 override val flow_time: String,
                                 override val inputCol: String,
                                 override val outputCol: String,
                                 override val preserveCols: String,
                                 override val modelPath: String,
                                 // 指示 regex 是拆分间隙（true）还是匹配标记（false）。默认值为：false
                                 gaps: Boolean,
                                 // 最小 token 长度，句子词数小于该值将不会被处理，大于或等于 0。默认值：1，以避免返回空字符串
                                 minTokenLength: Int,
                                 // regex 模式用于匹配分隔符（如果间隙为真）或标记（如果间隙为假）。默认值：“\\w+”
                                 pattern: String,
                                 // 指示在标记化之前是否将所有字符转换为小写，默认值为：true
                                 toLowercase: Boolean
                               ) extends FeatureParam {

  def this() = this(null, null, null, null, "input", "features", null, null, false, 1, "\\w+", true)

  override def verify(): Unit = {
    super.verify()
    require(minTokenLength >= 0, "Minimum token length, greater than or equal to 0. Default: 1, to avoid returning empty strings")
    require(tool.isNotNull(pattern), "Regex pattern used to match delimiters if gaps is true or tokens if gaps is false")
  }

  override def toMap: Map[String, Any] = {
    var map = super.toMap
    map += ("gaps" -> gaps)
    map += ("minTokenLength" -> minTokenLength)
    map += ("pattern" -> pattern)
    map += ("toLowercase" -> toLowercase)
    map
  }
}


class TokenizeEncoder extends AbstractFeatureEncoder[TokenizeEncoderParam] {

  override def setUp(param: TokenizeEncoderParam, data: DataFrame): Array[PipelineStage] = {
    val regexTokenizer = new RegexTokenizer()
      .setInputCol(param.inputCol)
      .setOutputCol(param.outputCol)
      .setPattern(param.pattern)
      .setGaps(param.gaps)
      .setMinTokenLength(param.minTokenLength)
      .setToLowercase(param.toLowercase)


    Array(regexTokenizer)
  }

}

object TokenizeEncoder {

  def apply(paramStr: String): Unit = {
    new TokenizeEncoder()(paramStr)
  }

  def main(args: Array[String]): Unit = {
    TokenizeEncoder(args(0))
  }

}
