package com.feidee.fdspark.transformer.ps

import com.tencent.angel.sona.ml.Transformer
import com.tencent.angel.sona.ml.param.{ParamMap, StringArrayParam}
import com.tencent.angel.sona.ml.util.{DefaultParamsReadable, DefaultParamsWritable, Identifiable}
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, Dataset}

/**
  * 删除 DataFrame 指定列
  * @note 每一个自定义的 Transformer 必须继承 DefaultParamsWritable，否则无法保存包含该 stage 的模型
  *       其伴生对象必须继承 DefaultParamsReadable，否则无法读取模型使用
  * @Author songhaicheng
  * @Date 2018/09/10
  * @Email: haicheng_song@sui.com
  */
class PSColEliminator(override val uid: String) extends Transformer with DefaultParamsWritable {
  final val drops = new StringArrayParam(this, "drops", "column names that will to be dropped")

  def setDrops(value: Array[String]): this.type = set(drops, value)
  def getDrops: Array[String] = $(drops)

  // 默认不删除任何列
  setDefault(drops, new Array[String](0))

  def this() = this(Identifiable.randomUID("ps_coleliminator"))

  override def transform(dataset: Dataset[_]): DataFrame = {
    // 删除指定列
    dataset.drop($(drops):_*)
  }

  override def copy(extra: ParamMap): Transformer = {
    defaultCopy(extra)
  }

  override def transformSchema(schema: StructType): StructType = {
    // 被删除的列必须存在
    $(drops).foreach(col => require(schema.fieldNames.contains(col), s"item of drops must be existed (false item: $col)"))
    StructType(schema.filterNot(field => $(drops).contains(field.name)))
  }

}
object PSColEliminator extends DefaultParamsReadable[PSColEliminator] {}


