package com.feidee.fd.sml.algorithm.component.clustering

import com.feidee.fd.sml.algorithm.component.{AbstractComponent, BasicParam}
import com.feidee.fdspark.transformer.{MetaStorage, ModelType}
import org.apache.spark.ml.clustering.LDA
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.sql.DataFrame

/**
  * @Author songhaicheng
  * @Date 2019/1/10 15:36
  * @Description
  * @Reviewer
  */
case class LDAParam(
                     override val input_pt: String,
                     override val output_pt: String,
                     override val hive_table: String,
                     override val flow_time: String,
                     // 保存模型的 HDFS 路径
                     modelPath: String,
                     // 待处理特征列，默认 features
                     featuresCol: String,
                     // 每个文档的对应主题混合分布列值，默认 topicDistribution
                     topicDistributionCol: String,
                     // 优化算法，支持 [em, online]，默认 online
                     optimizer: String,
                     // 随机种子，默认 123456
                     seed: Long,
                     // 主题数，> 1，默认 10
                     topicNum: Int,
                     // 最大迭代数，>= 0，默认 20
                     maxIter: Int,
                     // 检查点间隔，maxIter 很大的时候，检查点可以帮助减少 shuffle 文件大小并且可以帮助故障恢复，默认 10
                     checkpointInterval: Int,
                     // 指数衰减学习率，> 0，仅支持 online 优化算法，默认 0.51。取值最好在(0.5, 1.0]，以保证 online 算法渐进的收敛
                     learningDecay: Double,
                     // 学习率的衰减，> 0，用来减小前面训练样本批次对最终模型的影响，默认 1024
                     learningOffset: Double,
                     // 抽样率，区间(0, 1]，默认 0.05
                     subsamplingRate: Double,
                     // 主题在单词上的先验分布参数，使用 em 优化算法要求值大于 1.0，online 算法大于等于 0，默认自动
                     topicConcentration: Double,
                     // 文档在主题上分布的先验参数，em 算法要求所有值都一样且大于 1.0，online 算法所有值大于等于 0，默认自动
                     docConcentration: Array[Double],
                     // 是否保存 Spark 最后检查点，用于保证算法计算过程中容错性，仅支持 em 优化算法，默认 true
                     keepLastCheckpoint: Boolean,
                     // 是否在训练时优化 docConcentration，默认 true
                     optimizeDocConcentration: Boolean
                   ) extends BasicParam {

  def this() = this(null, null, null, null, null, "features", "topicDistribution", "online", 123456, 10, 20, 10, 0.51,
    1024, 0.05, 0, Array.empty[Double], true, true)

  override def verify(): Unit = {
    super.verify()
    val optimizers = Array("em", "online")
    require(tool.isNotNull(input_pt), "param input_pt can't be null")
    require(optimizers.contains(optimizer.toLowerCase), s"param optimizer only accepts [${optimizers.mkString(", ")}]," +
      s" but has $optimizer")
    require(topicNum > 1, "param topicNum must be greater than 1")
    require(maxIter >= 0, "param maxIter can't be negative")
    require(checkpointInterval == -1 || checkpointInterval >= 1, "param checkpointInterval must be" +
      " equals to -1 or not less than 1")
    require(learningDecay > 0, "param learningDecay must be greater than 0")
    require(learningOffset > 0, "param learningOffset must be greater than 0")
    require(subsamplingRate > 0 && subsamplingRate <= 1, "param subsamplingRate's range is (0, 1]")
  }

  override def toMap: Map[String, Any] = {
    var map = super.toMap
    map += ("modelPath" -> modelPath)
    map += ("featuresCol" -> featuresCol)
    map += ("topicDistributionCol" -> topicDistributionCol)
    map += ("optimizer" -> optimizer)
    map += ("seed" -> seed)
    map += ("topicNum" -> topicNum)
    map += ("maxIter" -> maxIter)
    map += ("checkpointInterval" -> checkpointInterval)
    map += ("learningDecay" -> learningDecay)
    map += ("learningOffset" -> learningOffset)
    map += ("subsamplingRate" -> subsamplingRate)
    map += ("topicConcentration" -> topicConcentration)
    map += ("docConcentration" -> docConcentration)
    map += ("keepLastCheckpoint" -> keepLastCheckpoint)
    map += ("optimizeDocConcentration" -> optimizeDocConcentration)
    map
  }
}


class LDAComponent extends AbstractComponent[LDAParam] {

  def train(param: LDAParam, data: DataFrame): PipelineModel = {
    // 存储训练参数元信息
    val meta = new MetaStorage()
      .setModelType(ModelType.Clustering)
      .setParameters(param.toMap)
      .setFields(data.schema.fieldNames)

    val lda = new LDA()
      .setFeaturesCol(param.featuresCol)
      .setTopicDistributionCol(param.topicDistributionCol)
      .setOptimizer(param.optimizer)
      .setSeed(param.seed)
      .setK(param.topicNum)
      .setMaxIter(param.maxIter)
      .setCheckpointInterval(param.checkpointInterval)
      .setLearningDecay(param.learningDecay)
      .setLearningOffset(param.learningOffset)
      .setSubsamplingRate(param.subsamplingRate)
      .setKeepLastCheckpoint(param.keepLastCheckpoint)
      .setOptimizeDocConcentration(param.optimizeDocConcentration)

    // 根据设置的优化算法，设置合法的主题在单词上的先验分布参数，否则自动设置
    if ("em".equals(param.optimizer) && param.topicConcentration > 1.0) {
      lda.setTopicConcentration(param.topicConcentration)
    } else if ("online".equals(param.optimizer) && param.topicConcentration >= 0.0) {
      lda.setTopicConcentration(param.topicConcentration)
    }

    if (param.docConcentration.length > 0) {
      lda.setDocConcentration(param.docConcentration)
    }

    val pipeline = new Pipeline()
      .setStages(Array(meta, lda))
    pipeline.fit(data)
  }

  def outputModel(model: PipelineModel, param: LDAParam): Unit = {
    model.write.overwrite().save(param.modelPath)
    if (tool.isNotNull(param.flow_time)) {
      model.write.overwrite().save(s"${param.modelPath}_${param.flow_time}")
    } else {
      logWarning("未发现运行时间参数，不做模型备份处理")
    }
  }

  override def apply(paramStr: String): Unit = {
    logInfo("parsing parameter")
    val param = parseParam(paramStr)
    logInfo("validating parameter")
    param.verify()
    logInfo(s"loading input data FROM ${param.input_pt}")
    val inputData = loadData(param)
    logInfo("training LDA model")
    val model = train(param, inputData)
    val result = model.transform(inputData)
    // 保存
    if (tool.isNotNull(param.output_pt)) {
      logInfo(s"saving feature result TO ${param.output_pt}")
      outputData(result, param)
    }
    if (tool.isNotNull(param.modelPath)) {
      logInfo(s"saving feature model TO ${param.modelPath}")
      outputModel(model, param)
    }
    if (tool.isNotNull(param.hive_table)) {
      logInfo(s"saving to hive table ${param.hive_table}")
      outputTable(result, param)
    }
  }

}

object LDAComponent {

  def apply(paramStr: String): Unit = {
    new LDAComponent()(paramStr)
  }

  def main(args: Array[String]): Unit = {
    LDAComponent(args(0))
  }

}
