package com.feidee.fd.sml.algorithm.forecast

import java.io.{File, FileInputStream}

import com.feidee.fd.sml.algorithm.util.Log
import com.feidee.fdhadoop.hdfs.HdfsUtils
import com.feidee.fdspark.transformer._
import ml.combust.bundle.BundleFile
import ml.combust.mleap.spark.SparkSupport._
import org.apache.spark.ml.bundle.SparkBundleContext
import org.apache.spark.ml.classification.FFMModel
import org.apache.spark.ml.feature.{IndexToString, StringIndexerModel}
import org.apache.spark.ml.linalg.{Vector => Vec}
import org.apache.spark.ml.{Pipeline, PipelineModel, Transformer}
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SparkSession, functions}
import org.jpmml.sparkml.PMMLBuilder
import resource.managed

import scala.collection.mutable.ArrayBuffer

/**
  * @Author: xiongjun
  * @Date: 2019/4/25 18:54
  */
class SparkForecast(override val spark: SparkSession, val fieldInfo: Map[String, Array[String]], override val paths: String*) extends Forecast[PipelineModel] with Log {
  var models: Array[PipelineModel] = load()
  val sparkStructType = constructSparkStructFromFieldInfo

  /**
    * 使用fieldInfo来构建StructType
    *
    * @return
    */
  private[algorithm] def constructSparkStructFromFieldInfo(): StructType = {
    val structFields = new ArrayBuffer[StructField]()
    if (fieldInfo != null && fieldInfo.nonEmpty) {
      // 训练数据字段元信息存储在第一个 PipelineModel 中
      val trainingFields = new StageFinder[MetaStorage](models.head).findWithOrder().get.getFields
      require(fieldInfo.size == trainingFields.length, "upload fieldInfo with trainSet fieldInfo unmatch")
      fieldInfo.map(kv => {
        kv._2.head match {
          case FieldInfo.INT =>
            structFields += StructField(kv._1, IntegerType)
          case FieldInfo.DOUBLE =>
            structFields += StructField(kv._1, DoubleType)
          case FieldInfo.STRING | FieldInfo.SEQUENCE | FieldInfo.SEQUENCE_INT | FieldInfo.SEQUENCE_FLOAT =>
            structFields += StructField(kv._1, StringType)
          case _ =>
            throw new Exception(s"system got data type is ${kv._2.head} ,but only accept int/double/string/sequence")
        }
      })
    }
    StructType(structFields)
  }

  /**
    * 根据 StructType 字段属性信息，把 Map 转成 Row
    *
    * @param dataMap
    * @return
    */
  private[algorithm] def mapToRow(dataMap: Map[String, Any], structType: StructType): Row = {
    val values = new ArrayBuffer[Any]()
    // 判断 value 类型，并以此构建 Row
    structType.fields.foreach(f => {
      val v = dataMap(f.name)
      f.dataType match {
        case _: StringType =>
          values += v.toString
        case _: IntegerType =>
          values += v.toString.toInt
        case _: DoubleType =>
          values += v.toString.toDouble
        case _: BooleanType =>
          values += v.toString.toBoolean
        case _ =>
          values += null
      }
    })
    Row.fromSeq(values)
  }

  private[algorithm] def mapToRow(dataMap: Map[String, Any]): Row = {
    require(sparkStructType != null && sparkStructType.nonEmpty, "sparkStructType is null or empty,please check field info")
    val values = sparkStructType.fields.map(_.name).map(field => {
      fieldInfo(field).head match {
        case FieldInfo.STRING | FieldInfo.SEQUENCE | FieldInfo.SEQUENCE_INT | FieldInfo.SEQUENCE_FLOAT =>
          if (dataMap.isDefinedAt(field)) {
            if (dataMap.getOrElse(field, null) == null) {
              if (fieldInfo(field).length != 2) {
                throw new Exception(s"data not contains $field,while this field not default value in fieldInfo")
              }
              fieldInfo(field).last
            } else {
              dataMap(field).toString
            }
          } else {
            if (fieldInfo(field).length != 2) {
              throw new Exception(s"data not contains $field,while this field not default value in fieldInfo")
            }
            fieldInfo(field).last
          }
        case FieldInfo.INT =>
          if (dataMap.isDefinedAt(field)) {
            if (dataMap.getOrElse(field, null) == null) {
              if (fieldInfo(field).length != 2) {
                throw new Exception(s"data not contains $field,while this field not default value in fieldInfo")
              }
              fieldInfo(field).last.toInt
            } else {
              dataMap(field).toString.toInt
            }
          } else {
            if (fieldInfo(field).length != 2) {
              throw new Exception(s"data not contains $field,while this field not default value in fieldInfo")
            }
            fieldInfo(field).last.toInt
          }
        case FieldInfo.DOUBLE =>
          if (dataMap.isDefinedAt(field)) {
            if (dataMap.getOrElse(field, null) == null) {
              if (fieldInfo(field).length != 2) {
                throw new Exception(s"data not contains $field,while this field not default value in fieldInfo")
              }
              fieldInfo(field).last.toDouble
            } else {
              dataMap(field).toString.toDouble
            }
          } else {
            if (fieldInfo(field).length != 2) {
              throw new Exception(s"data not contains $field,while this field not default value in fieldInfo")
            }
            fieldInfo(field).last.toDouble
          }
        case _ =>
          throw new Exception(s"not found field $field's data type")
      }
    })
    Row.fromSeq(values)
  }

  /**
    * 根据 Map key 和 value 生成 StructType
    *
    * @param dataMap
    * @return
    */
  private[algorithm] def parseStructFromMap(dataMap: Map[String, Any]): StructType = {
    val fields = new ArrayBuffer[StructField]()
    // 判断 value 类型，并以此构建 Row
    dataMap.keySet.foreach(k => {
      val v = dataMap(k)
      v match {
        case _: String =>
          fields += StructField(k, StringType)
        case _: Int =>
          fields += StructField(k, IntegerType)
        case _: Double =>
          fields += StructField(k, DoubleType)
        case _: Boolean =>
          fields += StructField(k, BooleanType)
        case _ =>
          logWarning("format of value only supports String, Int, Double and Boolean," +
            s" thus convert $k to null")
          fields += StructField(k, NullType)
      }
    })
    StructType(fields)
  }

  /**
    * 把 Seq[Map] 转成 DataFrame，key 作为字段名，value 作为值
    *
    * @param maps
    * @return
    */
  def mapsToDF(maps: Seq[Map[String, Any]]): DataFrame = {
    val structAndRows = if (sparkStructType != null && sparkStructType.nonEmpty) {
      (sparkStructType, maps.map(mapToRow))
    } else {
      val strcut = parseStructFromMap(maps.head)
      (strcut, maps.map(mapToRow(_, strcut)))
    }
    spark.createDataFrame(spark.sparkContext.parallelize(structAndRows._2), structAndRows._1)
  }

  /**
    * 预测方法
    * DD @param dataFrame
    *
    * @return
    */
  def predict(dataFrame: DataFrame, model: Array[PipelineModel] = models): DataFrame = {
    val startTime = System.currentTimeMillis()
    // 训练数据字段元信息存储在第一个 PipelineModel 中
    val trainingFields = new StageFinder[MetaStorage](model.head).findWithOrder().get.getFields
    val predictingFields = dataFrame.schema.fieldNames
    // 传入 dataFrame 至少有一个字段出现在使用模型的训练数据中
    require(
      trainingFields.intersect(predictingFields).length > 0,
      s"待预测的特征列[${predictingFields.mkString(",")}]未出现在训练数据中[${trainingFields.mkString(",")}]")
    // 传入特征列中包含了训练数据中未出现的列，则警告但依旧运行
    val diffFields = predictingFields.diff(trainingFields)
    if (diffFields.length > 0) {
      logWarning(s"传入字段[${diffFields.mkString(",")}]未出现在训练数据中[${trainingFields.mkString(",")}]")
    }
    // 用空字符串补全传入 dataFrame 中未设置相关字段的值
    val data = trainingFields
      .diff(predictingFields)
      .foldLeft(dataFrame)((cur, field) => cur.withColumn(field, functions.lit("")))
    val res = model.foldLeft(data)((cur, model) => model.transform(cur))
    logInfo(s"模型预测耗时: ${System.currentTimeMillis() - startTime}ms")
    res
  }

  /**
    * 加载模型
    */
  override def load(): Array[PipelineModel] = {
    require(paths.nonEmpty, "paths can't be null")
    // 依次从本地加载模型
    val reader = PipelineModel.read
    paths.map(reader.load).toArray
  }

  /**
    * 预测方法，支持自动补全构建模型预测需要的 DataFrame 数据格式
    *
    * @param maps
    * @return
    */
  def predictDF(maps: Seq[Map[String, Any]]): DataFrame = {
    predict(mapsToDF(maps))
  }

  def predict(data: DataFrame): DataFrame = {
    predict(data, models)
  }

  /**
    * 返回预测标签 topn 项和对应概率值
    *
    * @param prediction 已经预测好的原始 DataFrame
    * @param topn       as it indicates
    * @return
    */
  override def computeProbability(prediction: DataFrame, topn: Int): DataFrame = {
    // Spark pipeline
    val lastMeta = tool.getMetaFromModel(models.last)
    val labels = new StageFinder[StringIndexerModel](models.last).findWithOrder().get.labels
    val label2Probability = udf { probability: org.apache.spark.ml.linalg.Vector => {
      val probArr = probability.toArray
      var map = Map[String, Double]()
      for (i <- labels.indices) {
        map += (labels(i) -> probArr(i))
      }
      var resSeq = map.toSeq.sortWith(_._2 > _._2).map(kv => s"${kv._1}:${kv._2}")
      resSeq = if (topn > 0) {
        resSeq.take(topn)
      } else {
        resSeq
      }
      resSeq.mkString(", ")
    }
    }
    prediction.withColumn(
      tool.renameDuplicatedColName(Forecast.LABEL2PROBABILITY_COL, lastMeta.getFields),
      label2Probability(prediction.col(lastMeta.getParameters("probabilityCol").toString))
    )
  }

  /**
    * 根据 Spark 预测的结果，提取概率值
    *
    * @param prediction
    * @param topn
    * @return
    */
  private[algorithm] def computeProbabilityMap(prediction: DataFrame, topn: Int): Array[Map[String, Double]] = {
    require(prediction != null, "预测结果为空")
    val lastMeta = tool.getMetaFromModel(models.last)
    val labels = new StageFinder[StringIndexerModel](models.last).findWithOrder().get.labels
    val startTime = System.currentTimeMillis()

    val probArrs = prediction
      .select(lastMeta.getParameters("probabilityCol").toString)
      .rdd
      .map(_.get(0).asInstanceOf[Vec].toArray)
      .collect()
    val res = new Array[Map[String, Double]](probArrs.length)

    for (i <- probArrs.indices) {
      val probArr = probArrs(i)
      // 建立账本-预测值对应关系
      var map = Map[String, Double]()
      for (j <- labels.indices) {
        map += (labels(j) -> probArr(j))
      }
      // 按照预测概率值从大到小进行排序，并取 topn
      map = tool.sortMapValAndTakeTopn(map, topn)
      // 精确概率值到小数点后 8 位
      map.foreach(r => map += (r._1 -> r._2.formatted("%.8f").toDouble))
      res.update(i, map)
    }
    logInfo(s"计算概率耗时: ${System.currentTimeMillis() - startTime}ms")
    res
  }


  /**
    * 用加载好的模型预测并返回预测概率值
    *
    * @param maps
    * @param topn
    * @return
    */
  override def predictProbabilityMaps(maps: Seq[Map[String, Any]], topn: Int, preModel: Array[Forecast[Any]] = null): Array[Map[String, Double]] = {
    // 预测概率值的使用场景，最后一个必定是分类算法组件
    val ffmModel = new StageFinder[FFMModel](models.last).findWithOrder()

    if (ffmModel.isDefined) {
      val labels = new StageFinder[StringIndexerModel](models.last).findWithOrder().get.labels
      // FFM 模型，把前面组件跑出来的数据直接扔到 FFM 模型里，在本地进行计算，提升性能
      val features = predict(mapsToDF(maps), models.dropRight(1))
        .select(ffmModel.get.getFeaturesCol)
        .rdd
        .map(_.getSeq[String](0))
        .collect()
      val probs = features.map(f => ffmModel.get.raw2probabilityInPlace(ffmModel.get.predictRaw(f)))
      probs.map(p => {
        var res = Map[String, Double]()
        for (i <- Range(0, p.size)) {
          res += (labels(i) -> p(i))
        }
        res
      })
    } else {
      computeProbabilityMap(predictDF(maps), topn)
    }
  }

  /**
    * 用加载好的回归模型预测并返回实数值
    *
    * @param maps
    * @return
    */
  override def getPredictionMaps(maps: Seq[Map[String, Any]]): Array[Double] = ???


  /**
    * 把加载的 Spark PipelineModel 转成 PMML 模型文件并保存到指定 HDFS 目录
    *
    * @param destPath
    * @return 是否转换成功并上传到 HDFS 指定路径
    */
  def convertSparkToPMML(destPath: String): Boolean = {
    require(!this.models.isEmpty, "please load spark model(s) first")
    // 根据第一个模型的 MetaStorage 存储的 input_pt 参数拿到原始训练数据的字段信息
    val inputPath = tool.getMetaFromModel(models.head).getParameters("input_pt")

    require(inputPath != null, "input_pt is null")
    val inputPathStr = inputPath.toString

    require(HdfsUtils.fileExists(inputPathStr), s"invalid input_pt: $inputPathStr")
    val inputData = spark.read.load(inputPathStr)

    // retrieve all transformers
    val transformers: ArrayBuffer[Transformer] = new ArrayBuffer[Transformer]()
    models.foreach(_.stages.foldLeft(transformers)((cur, t) => cur += t))

    // generate pmml file by building a pipeline model to local
    val pipelineModel = new Pipeline().setStages(transformers.toArray).fit(inputData)
    val localPMML = new File(
      s"${System.getProperty("java.io.tmpdir")}/${tool.getFileName(destPath)}_${System.currentTimeMillis}")
    new PMMLBuilder(inputData.schema, pipelineModel).buildFile(localPMML)

    // upload local pmml model file to hdfs
    HdfsUtils.uploadModel(destPath, new FileInputStream(localPMML))
    require(HdfsUtils.fileExists(destPath), "PMML 模型文件上传到 HDFS 失败")

    // 删除临时文件
    if (!localPMML.delete()) {
      log.warn(s"删除本地临时 PMML 模型文件失败: ${localPMML.getPath}")
    }
    log.info(s"已将 Spark Pipeline 模型转置成 PMML 模型文件格式：$destPath")
    true
  }

  /**
    * 把加载的 Spark PipelineModel 转成 MLeap 模型文件并保存到指定 HDFS 目录
    *
    * @param destPath
    * @return 是否转换成功并上传到 HDFS 指定路径
    */
  def convertSparkToBoundle(destPath: String): Boolean = {
    require(!this.models.isEmpty, "please load spark model(s) first")
    // 根据第一个模型的 MetaStorage 存储的 input_pt 参数拿到原始训练数据的字段信息
    val inputPath = tool.getMetaFromModel(models.head).getParameters("input_pt")

    require(inputPath != null, "input_pt is null")
    val inputPathStr = inputPath.toString

    require(HdfsUtils.fileExists(inputPathStr), s"invalid input_pt: $inputPathStr")
    val inputData = spark.read.load(inputPathStr)

    // retrieve all transformers
    var transformers: ArrayBuffer[Transformer] = new ArrayBuffer[Transformer]()
    models.foreach(_.stages.foldLeft(transformers)((cur, t) => cur += t))
    transformers = transformers.filter(tr => !tr.isInstanceOf[ColSelector])
    // generate pmml file by building a pipeline model to local
    val pipelineModel = new Pipeline().setStages(transformers.toArray).fit(inputData)

    var labels = Array.empty[String]
    var fieldName = ""
    var isMlComponent = false
    val mlComponents = Seq(ModelType.Algorithm_Classification_Prob, ModelType.Algorithm_Regression, ModelType.Clustering)
    for (transformer <- pipelineModel.stages) {
      transformer match {
        case meta: MetaStorage =>
          if (mlComponents.contains(meta.getModelType)) {
            isMlComponent = true
            fieldName = transformer.asInstanceOf[MetaStorage].getParameters("labelCol").toString
          }
        case sim: StringIndexerModel =>
          if (isMlComponent) {
            labels = sim.labels
          }
        case its: IndexToString =>
          if (isMlComponent && labels.length > 0) {
            its.setLabels(labels)
            isMlComponent = false
          }
        case ce: ColEliminator =>
          ce.setDrops(new Array[String](0))
        case _ =>
      }
    }

    val localMleapPath = s"${System.getProperty("java.io.tmpdir")}/" +
      s"${tool.getFileName(destPath)}_${System.currentTimeMillis}.zip"
    val sbc = SparkBundleContext().withDataset(pipelineModel.transform(inputData))
    for (bf <- managed(BundleFile(s"jar:file:$localMleapPath"))) {
      pipelineModel.writeBundle.save(bf)(sbc).get
    }
    HdfsUtils.uploadModel(destPath, new FileInputStream(localMleapPath))
    require(HdfsUtils.fileExists(destPath), "MLEAP 模型文件上传到 HDFS 失败")

    // 删除临时文件
    if (!new File(localMleapPath).delete()) {
      log.warn(s"删除本地临时 MLEAP 模型文件失败: $localMleapPath")
    }
    log.info(s"已将 Spark Pipeline 模型转置成 MLEAP 模型文件格式：$destPath")
    true
  }

  override def predict(maps: Seq[Map[String, Any]]): Array[Map[String, Any]] = {
    val ffmModel = new StageFinder[FFMModel](models.last).findWithOrder()

    if (ffmModel.isDefined) {
      val labels = new StageFinder[StringIndexerModel](models.last).findWithOrder().get.labels
      // FFM 模型，把前面组件跑出来的数据直接扔到 FFM 模型里，在本地进行计算，提升性能
      val features = predict(mapsToDF(maps), models.dropRight(1))
        .select(ffmModel.get.getFeaturesCol)
        .rdd
        .map(_.getSeq[String](0))
        .collect()
      val probs = features.map(f => ffmModel.get.raw2probabilityInPlace(ffmModel.get.predictRaw(f)))

      probs.map(p => {
        var res = Map[String, Any]()
        for (i <- Range(0, p.size)) {
          res += (labels(i) -> p(i))
        }
        res
      })
    } else {
      val df = predictDF(maps)
      val fields = df.schema.fields.map(_.name)
      df.select("*").collect().map(row => {
        fields.zipWithIndex.map(nameAndIndex => {
          val value = row.get(nameAndIndex._2)
          nameAndIndex._1 -> value
        }).toMap
      })
    }
  }
}
