package com.feidee.fdspark.transformer

import org.apache.spark.ml.Transformer
import org.apache.spark.ml.param.{Param, ParamMap, StringArrayParam}
import org.apache.spark.ml.util.{DefaultParamsReadable, DefaultParamsWritable, Identifiable}
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, functions}

/**
  * @Author songhaicheng
  * @Date 2018/10/15 19:13
  * @Description 将多列用分隔符连接
  * @Reviewer
  */
class ColAssembler(override val uid: String) extends Transformer with DefaultParamsWritable {
  final val cols = new StringArrayParam(this, "cols", "cols to be assembled")
  final val delimiter = new Param[String](this, "delimiter", "delimiter")
  final val outputCol = new Param[String](this, "outputCol", "result col name")

  def setCols(value: Array[String]): this.type = set(cols, value)
  def getCols: Array[String] = $(cols)
  def setDelimiter(value: String): this.type = set(delimiter, value)
  def getDelimiter: String = $(delimiter)
  def setOutputCol(value: String): this.type = set(outputCol, value)
  def getOutputCol: String = $(outputCol)

  // 默认不连接任何列
  setDefault(cols, new Array[String](0))
  // 默认空格连接
  setDefault(delimiter, " ")
  // 默认保存列名为 features
  setDefault(outputCol, "features")

  def this() = this(Identifiable.randomUID("colassembler"))

  override def transform(dataset: Dataset[_]): DataFrame = {
    dataset.withColumn($(outputCol), functions.concat_ws($(delimiter), $(cols).map(dataset.col):_*))
  }

  override def copy(extra: ParamMap): Transformer = {
    defaultCopy(extra)
  }

  override def transformSchema(schema: StructType): StructType = {
    // 参数不能为空
    require($(cols).length > 0, "cols' length must be greater than 0")
    // 连接的列必须存在
    $(cols).foreach(col => require(schema.fieldNames.contains(col), s"item of cols must be existed (false item: $col)"))
    schema.add(StructField($(outputCol), StringType))
  }
}

object ColAssembler extends DefaultParamsReadable[ColSelector] {}
