package com.feidee.fd.sml.algorithm.component.preprocess

import com.feidee.fd.sml.algorithm.component.{AbstractComponent, BasicParam}
import com.feidee.fdspark.transformer.{ColSelector, MetaStorage, ModelType}
import org.apache.spark.ml.{Pipeline, PipelineModel, PipelineStage}
import org.apache.spark.sql.DataFrame

/**
  * @Author: xiongjun
  * @Date: 2019/4/24 15:32
  */
abstract class AbstractPreprocessor[A<:PreprocessorParam](implicit m:Manifest[A]) extends AbstractComponent[A] {
  /**
    * 根据参入参数设置算法组件实体
    * @param param
    * @param data
    * @return
    */
  def setUp(param:A,data:DataFrame):Array[PipelineStage] = ???

  def train(param: A, data: DataFrame): PipelineModel = {
    // 把 preserveCols 取出来
    val preserveCols = if (tool.isNotNull(param.preserveCols)) {
      param.preserveCols.split(",")
    } else {
      // 不填 preserveCols 时，赋上一个空数组，不然上面会返回一个长度为 1 内容空字符串的数组！
      new Array[String](0)
    }

    // 结果列
    val cols = preserveCols ++ param.outputCol.split(",")

    // 存储训练参数元信息
    val meta = new MetaStorage()
      .setModelType(ModelType.Preprocessing)
      .setParameters(param.toMap)
      .setFields(data.schema.fieldNames)

    // 最终结果只保留结果列
    val colSelector = new ColSelector()
      .setCols(cols)

    // 执行流程：存储元信息 》 特征组件处理过程 》 保留结果列
    val pipeline = new Pipeline().setStages(Array(meta) ++ setUp(param, data) ++ Array(colSelector))
    val model = pipeline.fit(data)

    model
  }

  /**
    * 保存模型，除了要保存一份到参数指定路径，还需要在该路径下保存一份以时间作为后缀的模型作为备份及供线上使用
    * @param model  要保存的模型
    * @param param  参数
    */
  def outputModel(model: PipelineModel, param: A): Unit = {
    model.write.overwrite().save(param.modelPath)
    if (tool.isNotNull(param.flow_time)) {
      model.write.overwrite.save(s"${param.modelPath}_${param.flow_time}")
    } else {
      logWarning("未发现运行时间参数，不做模型备份处理")
    }
  }

  override def apply(paramStr: String): Unit = {
    logInfo("parsing parameter")
    val param = parseParam(paramStr)
    logInfo("validating parameter")
    param.verify()
    logInfo(s"loading input data FROM ${param.input_pt}")
    val inputData = loadData(param)
    logInfo("training feature model")
    val model = train(param, inputData)
    val result = model.transform(inputData)
    // 保存
    if (tool.isNotNull(param.output_pt)) {
      logInfo(s"saving feature result TO ${param.output_pt}")
      outputData(result, param)
    }
    if (tool.isNotNull(param.modelPath)) {
      logInfo(s"saving feature model TO ${param.modelPath}")
      outputModel(model, param)
    }
    if (tool.isNotNull(param.hive_table)) {
      logInfo(s"saving to hive table ${param.hive_table}")
      outputTable(result, param)
    }
  }
}

trait PreprocessorParam extends BasicParam{
  override val input_pt: String
  override val output_pt: String
  override val hive_table: String
  override val flow_time: String

  val inputCol: String
  val outputCol: String
  val modelPath: String
  val preserveCols: String

  override def verify(): Unit = {
    super.verify()
    require(tool.isNotNull(input_pt), "param input_pt can't be null")
    require(tool.isNotNull(inputCol), "输入字段 inputCol 不能为空")
    require(tool.isNotNull(outputCol), "输出字段 outputCol 不能为空")
    val inputCols = inputCol.split(",")
    val outputCols = outputCol.split(",")
    require(inputCols.intersect(outputCols).length == 0,
      "输入字段 inputCol 不能与输出字段 outputCol 出现重合")
    require(!tool.isNotNull(preserveCols) || preserveCols.intersect(outputCols).length == 0,
      "保留字段 preserveCols 不能与输出字段 outputCol 出现重合")
  }

  override def toMap: Map[String, Any] = {
    var map = super.toMap
    map += ("inputCol" -> inputCol)
    map += ("outputCol" -> outputCol)
    map += ("preserveCols" -> preserveCols)
    map += ("modelPath" -> modelPath)
    map
  }

}
