package com.feidee.fd.sml.algorithm.component.feature

import org.apache.spark.ml.PipelineStage
import org.apache.spark.ml.feature.Word2Vec
import org.apache.spark.sql.DataFrame

/**
  * @Author songhaicheng
  * @Date 2018/10/11 16:33
  * @Description  Word2Vec 特征组件
  * @Reviewer dongguosheng
  */
case class Word2VecEncoderParam(
                                 override val input_pt: String,
                                 override val output_pt: String,
                                 override val hive_table: String,
                                 override val flow_time: String,
                                 override val inputCol: String,
                                 override val outputCol: String,
                                 override val preserveCols: String,
                                 override val modelPath: String,
                                 // 保留下来的词必须出现的最小次数，词频低于该值的词将会被过滤掉不参与模型计算，默认 5，>= 0
                                 minCount: Int,
                                 // 向量维数，默认 100，> 0
                                 vectorSize: Int,
                                 // 最大迭代数，默认 1，>= 0
                                 maxIter: Int,
                                 // 每个句子（特征列）中出现词数的最大值，默认 1000，> 0
                                 maxSentenceLength: Int,
                                 // 句子的分片数，默认 1，> 0
                                 numPartitions: Int,
                                 // 随机种子，默认 123456
                                 seed: Long,
                                 // 迭代步长，默认 0.025，> 0
                                 stepSize: Double,
                                 // 窗口值，默认 5，> 0
                                 windowSize: Int
                                 //isConvertToDoc2Vec: Boolean    // 是否继续转成 Doc2Vec，默认 false
                               ) extends FeatureParam {

  def this() = this(null, null, null, null, "input", "features", null, null, 5, 100, 1, 1000, 1, 123456L, 0.025, 5)

  override def verify(): Unit = {
    super.verify()
    require(minCount >= 0, "param minCount can't be negative")
    require(vectorSize > 0, "param vectorSize must be grater than 0")
    require(maxIter >= 0, "param maxIter can't be negative")
    require(maxSentenceLength > 0, "param maxSentenceLength must be grater than 0")
    require(numPartitions > 0, "param numPartitions must be grater than 0")
    require(stepSize > 0, "param stepSize must be grater than 0")
    require(windowSize > 0, "param windowSize must be grater than 0")
    require(!inputCol.split(",").contains(outputCol), "param outputCol conflicts with inputCol")
  }

  override def toMap: Map[String, Any] = {
    var map = super.toMap
    map += ("minCount" -> minCount)
    map += ("vectorSize" -> vectorSize)
    map += ("maxIter" -> maxIter)
    map += ("maxSentenceLength" -> maxSentenceLength)
    map += ("numPartitions" -> numPartitions)
    map += ("seed" -> seed)
    map += ("stepSize" -> stepSize)
    map += ("windowSize" -> windowSize)
    //map += ("isConvertToDoc2Vec" -> isConvertToDoc2Vec)
    map
  }
}


class Word2VecEncoder extends AbstractFeatureEncoder[Word2VecEncoderParam] {

  override def setUp(param: Word2VecEncoderParam, data: DataFrame): Array[PipelineStage] = {
    val word2Vec = new Word2Vec()
      .setInputCol(param.inputCol)
      .setOutputCol(param.outputCol)
      .setMinCount(param.minCount)
      .setVectorSize(param.vectorSize)
      .setMaxIter(param.maxIter)
      .setMaxSentenceLength(param.maxSentenceLength)
      .setNumPartitions(param.numPartitions)
      .setSeed(param.seed)
      .setStepSize(param.stepSize)
      .setWindowSize(param.windowSize)

    Array(word2Vec)
  }

}

object Word2VecEncoder {

  def apply(paramStr: String): Unit = {
    new Word2VecEncoder()(paramStr)
  }

  def main(args: Array[String]): Unit = {
    Word2VecEncoder(args(0))
  }
}
