package com.feidee.fd.sml.algorithm.component

import com.feidee.fd.sml.algorithm.forecast.{Forecast, MleapForecast, PMMLForecast, SparkForecast}
import com.feidee.fdspark.transformer.ModelType
import org.apache.spark.sql.Column

/**
  * @Author songhaicheng
  * @Date 2018/10/19 14:09
  * @Description  离线批量预测类
  * @Reviewer
  */
class BulkForecast extends AbstractComponent[BulkForecastParam] {

//  override def apply(paramStr: String): Unit = {
//    val param = parseParam(paramStr)
//    param.verify()
//    val inputData = loadData(param)
//    //加载模型
//    val forecast = param.modelType match {
//      case Forecast.MODEL_TYPE_SPARK =>
//        new SparkForecast(spark,null, param.modelPaths: _*)
//      case Forecast.MODEL_TYPE_PMML =>
//        new PMMLForecast(spark,null, param.modelPaths: _*)
//      case Forecast.MODEL_TYPE_MLEAP=>
//        new MleapForecast( 30,null, param.modelPaths: _*)
//    }
//
//    val prediction = forecast.predict(inputData)
//
//    // 根据模型格式计算批量预测结果
//    val outputData = param.modelType match {
//      case Forecast.MODEL_TYPE_SPARK =>
//        // 根据最后一个模型的类型决定输出什么结果
//        val lastMeta = tool.getMetaFromModel(forecast.asInstanceOf[SparkForecast].models.last)
//        lastMeta.getModelType match {
//          case ModelType.Feature =>
//            // 特征工程在原数据结构上增加 outputCol
//            val outputCol = lastMeta.getParameters("outputCol").toString
//            val cols = inputData.schema.fieldNames ++ Array(outputCol)
//            prediction.select(cols.map(new Column(_)): _*)
//          case ModelType.Algorithm_Classification_Prob =>
//            // 有概率值的分类算法返回原列和目标预测结果列（标签值:概率值）
//            val label2ProbabilityCol = tool.renameDuplicatedColName(
//              Forecast.LABEL2PROBABILITY_COL,
//              inputData.schema.fieldNames)
//            val cols = inputData.schema.fieldNames ++ Array(label2ProbabilityCol)
//            val res = forecast.computeProbability(prediction, param.topn)
//            // 对最终结果取其与待预测数据的交集列，防止待预测数据出现了原训练数据没有的列，导致结果未出现该列但强取导致的列不存在错误
//            res.select(res.schema.fieldNames.intersect(cols).map(new Column(_)): _*)
//          case _ =>
//            prediction
//        }
//      case Forecast.MODEL_TYPE_PMML =>
//        // 只返回原原列和目标预测结果列（标签值:概率值）
//        val label2ProbabilityCol = tool.renameDuplicatedColName(
//          Forecast.LABEL2PROBABILITY_COL,
//          inputData.schema.fieldNames)
//        val cols = inputData.schema.fieldNames ++ Array(label2ProbabilityCol)
//        forecast.computeProbability(prediction, param.topn).select(cols.map(new Column(_)): _*)
//      case Forecast.MODEL_TYPE_MLEAP=>
//        val label2ProbabilityCol = tool.renameDuplicatedColName(
//          Forecast.LABEL2PROBABILITY_COL,
//          inputData.schema.fieldNames)
//        val cols = inputData.schema.fieldNames ++ Array(label2ProbabilityCol)
//        forecast.computeProbability(prediction, param.topn).select(cols.map(new Column(_)): _*)
//    }
//
//    // 保存结果到 Hive 表
//    outputData.sqlContext.sql(s"DROP TABLE IF EXISTS ${param.hive_table}")
//    outputData.write.saveAsTable(param.hive_table)
//  }

}

object BulkForecast {

  def apply(paramStr: String): Unit = {
    new BulkForecast()(paramStr)
  }

  def main(args: Array[String]): Unit = {
    BulkForecast(args(0))
  }

}

class BulkForecastParam(
                         override val input_pt: String,
                         override val output_pt: String,
                         override val hive_table: String,
                         override val flow_time: String,
                         // 模型路径
                         var modelPaths: Array[String],
                         // 模型格式，支持传入 [spark, pmml]，默认 spark
                         var modelType: String,
                         // 预测结果返回项（按概率值排），值为 0 时返回全部，默认 0
                         var topn: Int
                       ) extends BasicParam {

  def this() = this(null, null, null, null, Array.empty, Forecast.MODEL_TYPE_SPARK, 0)

  override def verify(): Unit = {
    super.verify()
    require(tool.isNotNull(input_pt), "param input_pt can't be null")
    require(tool.isNotNull(hive_table), "param hive_table can't be null")
    require(modelPaths.nonEmpty, "param modelPaths' length must be greater than 0")
    require(Forecast.SUPPORTED_MODEL_TYPES.contains(modelType.toLowerCase),
      s"param modelType only accepts [${Forecast.SUPPORTED_MODEL_TYPES.mkString(", ")}], but has $modelType")
  }

}
