package com.feidee.fd.sml.algorithm.component.preprocess

import org.apache.spark.ml.feature.LibSVMParser
import org.apache.spark.ml.PipelineStage
import org.apache.spark.sql.DataFrame

/**
  * @Author songhaicheng
  * @Date 2019/6/17 15:07
  * @Description 填充字符串列缺失值预处理组件
  * @Reviewer
  */
case class LibSVMEncoderParam(
                                     override val input_pt: String,
                                     override val output_pt: String,
                                     override val hive_table: String,
                                     override val flow_time: String,
                                     override val inputCol: String,
                                     override val outputCol: String,
                                     override val modelPath: String,
                                     override val preserveCols: String,
                                     returnValue:Boolean
                             ) extends PreprocessorParam {
  def this() = this(null, null, null, null, "input", "output", null, null, false)

  override def verify(): Unit = {
    super.verify()
  }

  override def toMap: Map[String, Any] = {
    var map = super.toMap
    map += ("returnValue" -> returnValue)
    map
  }
}


class LibSVMEncoder extends AbstractPreprocessor[LibSVMEncoderParam] {

  override def setUp(param: LibSVMEncoderParam, data: DataFrame): Array[PipelineStage] = {
    val libsvmParser = new LibSVMParser()
        .setInputCols(param.inputCol.split(","))
        .setOutputCols(param.outputCol.split(","))
        .setReturnValue(param.returnValue)
    Array(libsvmParser)
  }

}

object LibSVMEncoder {

  def apply(paramStr: String): Unit = {
    new LibSVMEncoder()(paramStr)
  }

  def main(args: Array[String]): Unit = {
    LibSVMEncoder(args(0))
  }
}
