package com.feidee.fd.sml.algorithm.forecast

import java.io.File
import java.{lang, util}

import com.feidee.fdhadoop.hdfs.HdfsUtils
import com.feidee.fdspark.transformer.FieldInfo
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.{Column, DataFrame, SparkSession, functions}
import org.dmg.pmml.{FieldName, Model}
import org.jpmml.evaluator.spark.TransformerBuilder
import org.jpmml.evaluator.{FieldValue, InputField, LoadingModelEvaluatorBuilder, ModelEvaluator}

import scala.collection.JavaConversions
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer

/**
  * @Author: xiongjun
  * @Date: 2019/4/28 8:56
  */
class PMMLForecast(override val spark: SparkSession, val fieldInfo: Map[String, Array[String]], override val paths: String*) extends Forecast[ModelEvaluator[_ <: Model]] {

  var _evaluator: ModelEvaluator[_ <: Model] = load().head

  // 是否使用字段信息对输入特征做处理
  val _isFieldInfoExists: Boolean = fieldInfo != null && fieldInfo.nonEmpty

  override def load(): Array[ModelEvaluator[_ <: Model]] = {
    val startTime = System.currentTimeMillis()
    // 从 HDFS 上读取模型文件到本地临时文件
    val localTmpFile = new File(s"${System.getProperty("java.io.tmpdir")}/${tool.getFileName(paths.head)}_${System.currentTimeMillis}")
    HdfsUtils.get(paths.head, localTmpFile.getPath)
    // 加载并验证模型
    val evaluator = new LoadingModelEvaluatorBuilder().load(localTmpFile).build()
    evaluator.verify()
    // 删除临时文件
    if (!localTmpFile.delete()) {
      log.warn(s"在加载 PMML 模型后，删除临时文件失败: ${localTmpFile.getPath}")
    }

    logInfo(s"加载模型（PMML）耗时: ${System.currentTimeMillis() - startTime}ms")
    Array(evaluator)

  }


  /**
    * 使用 PMML 模型文件进行预测
    *
    * @param dataFrame
    * @return
    */
  private[algorithm] def predictWithPMML(dataFrame: DataFrame): DataFrame = {
    val startTime = System.currentTimeMillis()
    val inputFields = dataFrame.schema.fieldNames
    var data = dataFrame
    // 传入 dataFrame 至少有一个字段出现在使用模型的训练数据中
    var isValid = false
    // 检测是否缺失必须输入列，如果有就自动补上 null 值
    for (input <- _evaluator.getInputFields.toArray) {
      val inputFieldName = input.asInstanceOf[InputField].getName.getValue
      if (!inputFields.contains(inputFieldName)) {
        data = data.withColumn(inputFieldName, functions.lit(null))
      } else {
        isValid = true
      }
    }
    require(isValid, s"待预测的特征列[${inputFields.mkString(",")}]未出现在训练数据中[" +
      s"${_evaluator.getInputFields.toArray.map(f => f.asInstanceOf[InputField].getName.getValue).mkString(",")}]")
    val res = new TransformerBuilder(_evaluator)
      .withTargetCols()
      .withOutputCols()
      .exploded(true)
      .build()
      .transform(data)

    logInfo(s"PMML 模型预测耗时: ${System.currentTimeMillis() - startTime}ms")

    res
  }

  /**
    * 返回预测标签 topn 项和对应概率值
    *
    * @param prediction 已经预测好的原始 DataFrame
    * @param topn       as it indicates
    * @return
    */
  override def computeProbability(prediction: DataFrame, topn: Int = 0): DataFrame = {
    val probNames = new ArrayBuffer[String]()
    prediction.schema.map(field => {
      val fName = tool.extractDataByReg(Forecast.PROBABILITY_REG, field.name, 1)
      if (fName.length > 0) {
        probNames += fName
      }
    })
    val names = spark.sparkContext.broadcast(probNames)
    // 解析出概率值，并从大到小进行排序
    val sortByProb = udf { probVals: String => {
      var map = Map[String, Double]()
      val probArr = probVals.split(",")
      for (i <- probArr.indices) {
        map += (names.value(i) -> probArr(i).toDouble)
      }
      var resSeq = map.toSeq.sortWith(_._2 > _._2).map(kv => s"${kv._1}:${kv._2}")
      resSeq = if (topn > 0) {
        resSeq.take(topn)
      } else {
        resSeq
      }
      resSeq.mkString(", ")
    }
    }

    names.unpersist()
    // 把得到的各个标签概率值拼装成 label:probability, [...] 形式，并最终把多余的各个概率列剔除掉，取而代之合成的概率标签列
    val resColName = tool.renameDuplicatedColName(Forecast.LABEL2PROBABILITY_COL, prediction.schema.fieldNames)
    prediction
      .withColumn(resColName, functions.concat_ws(",", probNames.map(n => new Column(s"probability($n)")): _*))
      .withColumn(resColName, sortByProb(new Column(resColName)))
      .drop(probNames.map(n => s"probability($n)"): _*)
  }

  /**
    * 用加载好的模型预测并返回预测概率值
    *
    * @param maps
    * @param topn
    * @return
    */
  override def predictProbabilityMaps(maps: Seq[Map[String, Any]], topn: Int, preModel: Array[Forecast[Any]] = null): Array[Map[String, Double]] = {
    predictProbabilitiesWithPMML(maps, topn)
  }

  /**
    * 数据类型转换
    *
    * @param dataMap
    * @return
    */
  def dataTypeConvert(dataMap: Map[String, Any]): Map[FieldName, FieldValue] = {
    // 传入 dataFrame 至少有一个字段出现在使用模型的训练数据中
    var isValid = false
    // 读取出待预测数据信息
    var arg = Map[FieldName, FieldValue]()
    for (input <- _evaluator.getInputFields.toArray) {
      val inputField = input.asInstanceOf[InputField]
      val fieldName = inputField.getName.getValue
      if (!isValid && dataMap.isDefinedAt(fieldName)) {
        isValid = true
      }
      // 优先用传过来的值，没的化用字段信息的默认值，如果还不行就用 null 值
      var value = if (dataMap.isDefinedAt(fieldName)) {
        dataMap(fieldName)
      } else if (_isFieldInfoExists) {
        if (fieldInfo.isDefinedAt(fieldName)) {
          fieldInfo(fieldName).last
        } else {
          // 如果传了字段信息，必须要传完整（模型有多少训练字段，字段信息就要传多少字段）
          throw new Exception(s"model field info error, because fieldInfo does not contain field [$fieldName]")
        }
      } else {
        null
      }
      // 根据字段信息进行字段类型转换
      if (_isFieldInfoExists) {
        value = fieldInfo.get(fieldName) match {
          case Some(Array(FieldInfo.STRING, _)) =>
            value.toString
          case Some(Array(FieldInfo.SEQUENCE, _)) =>
            value.toString
          case Some(Array(FieldInfo.DOUBLE, _)) =>
            value.toString.toDouble
          case Some(Array(FieldInfo.INT, _)) =>
            value.toString.toInt
          case _ =>
            value
        }
      }
      arg += (inputField.getName -> inputField.prepare(value))
    }
    require(isValid, s"待预测的特征列[${dataMap.keySet.mkString(",")}]未出现在训练数据中[" +
      s"${_evaluator.getInputFields.toArray.map(f => f.asInstanceOf[InputField].getName.getValue).mkString(",")}]")
    arg
  }

  /**
    * 使用 PMML 模型进行预测，并提取概率值
    *
    * @param maps
    * @param topn
    * @return
    */
  private[algorithm] def predictProbabilitiesWithPMML(maps: Seq[Map[String, Any]],
                                                      topn: Int): Array[Map[String, Double]] = {
    val startTime = System.currentTimeMillis()
    val totalRes = maps.map(dataMap => {
      val arg = dataTypeConvert(dataMap)
      // 拿结果
      var res = Map[String, Double]()
      for (pred <- _evaluator.evaluate(arg)) {

        // 把结果中的概率值拿出来
        val probVal = tool.extractDataByReg(Forecast.PROBABILITY_REG, pred._1.getValue, 1)
        // 利用正则检测是用来记录概率值的（probability(xxx)形式），且值是 Double 类型（合法概率值类型）
        if (probVal.length > 0 && pred._2.isInstanceOf[Number]) {
          res += (probVal -> pred._2.toString.toDouble)
        }
      }
      // 排序并且取 topn
      res = tool.sortMapValAndTakeTopn(res, topn)
      // 概率值精确到小数点后 8 位
      res.foreach(r => res += (r._1 -> r._2.formatted("%.8f").toDouble))
      res
    }).toArray
    logInfo(s"计算概率（PMML）总耗时: ${System.currentTimeMillis() - startTime}ms")
    totalRes
  }

  /**
    * 用加载好的回归模型预测并返回实数值
    *
    * @param maps
    * @return
    */
  override def getPredictionMaps(maps: Seq[Map[String, Any]]): Array[Double] = {
    null
  }


  override def predictionRegJavaList(javaMaps: util.List[util.Map[String, Any]]): util.List[lang.Double] = {
    val scalaRes = getPredictionMaps(
      JavaConversions.asScalaBuffer(
        javaMaps.map(
          JavaConversions.mapAsScalaMap)).toSeq)
    scalaRes.toList.map(r => lang.Double.valueOf(r))
  }

  override def predict(maps: Seq[Map[String, Any]]): Array[Map[String, Any]] = {
    val totalRes = maps.map(dataMap => {
      val arg = dataTypeConvert(dataMap)
      // 拿结果
      var res = Map[String, Any]()
      for (pred <- _evaluator.evaluate(arg)) {
        // 把结果中的概率值拿出来
        val probVal = tool.extractDataByReg(Forecast.PROBABILITY_REG, pred._1.getValue, 1)
        // 利用正则检测是用来记录概率值的（probability(xxx)形式），且值是 Double 类型（合法概率值类型）
        if (probVal.length > 0 && pred._2.isInstanceOf[Number]) {
          res += (probVal -> pred._2.toString.toDouble)
        }
      }
      res
    }).toArray
    totalRes
  }

}
