package com.feidee.fd.sml.algorithm.component.preprocess

import com.feidee.fdspark.transformer.ColAssembler
import org.apache.spark.ml.PipelineStage
import org.apache.spark.sql.DataFrame

/**
  * @Author: xiongjun
  * @Date: 2019/4/24 16:26
  */
class AssembleCol extends AbstractPreprocessor[AssembleColParam] {
  override def setUp(param: AssembleColParam, data: DataFrame): Array[PipelineStage] = {
    val colAssembler = new ColAssembler()
      .setCols(param.inputCol.split(","))
      .setOutputCol(param.outputCol)
      .setDelimiter(param.delimiter)
    Array(colAssembler)
  }

}

object AssembleCol {

  def apply(paramStr: String): Unit = {
    new AssembleCol()(paramStr)
  }

  def main(args: Array[String]): Unit = {
    AssembleCol(args(0))
  }
}

case class AssembleColParam(override val input_pt: String,
                            override val output_pt: String,
                            override val hive_table: String,
                            override val flow_time: String,
                            override val inputCol: String,
                            override val outputCol: String,
                            override val modelPath: String,
                            override val preserveCols: String,
                            delimiter: String) extends PreprocessorParam {
  def this() = this(null, null, null, null, "input", "output", null, null, null)

  override def toMap: Map[String, Any] = {
    var map = super.toMap
    map += ("delimiter" -> delimiter)
    map
  }
}


