package com.feidee.fd.sml.algorithm.component.feature

import org.apache.spark.ml.PipelineStage
import org.apache.spark.ml.feature.PCA
import org.apache.spark.sql.DataFrame

/**
  * @Author songhaicheng
  * @Date 2019/3/25 18:58
  * @Description
  * @Reviewer dongguosheng
  */
case class PCAEncoderParam(
                            override val input_pt: String,
                            override val output_pt: String,
                            override val hive_table: String,
                            override val flow_time: String,
                            override val inputCol: String,
                            override val outputCol: String,
                            override val preserveCols: String,
                            override val modelPath: String,
                            // 主成分特征维数，> 0
                            k: Int
                          ) extends FeatureParam {

  def this() = this(null, null, null, null, "input", "features", null, null, Int.MinValue)

  override def verify(): Unit = {
    super.verify()
    require(k > 0, s"param k must be greater than 0, instead of $k")
  }

  override def toMap: Map[String, Any] = {
    var map = super.toMap
    map += ("k" -> k)
    map
  }
}


class PCAEncoder extends AbstractFeatureEncoder[PCAEncoderParam] {
  override def setUp(param: PCAEncoderParam, data: DataFrame): Array[PipelineStage] = {
    val pca = new PCA()
      .setInputCol(param.inputCol)
      .setOutputCol(param.outputCol)
      .setK(param.k)

    Array(pca)
  }
}

object PCAEncoder {
  def apply(paramStr: String): Unit = {
    new PCAEncoder()(paramStr)
  }

  def main(args: Array[String]): Unit = {
    PCAEncoder(args(0))
  }
}
