package org.apache.spark.ml.feature

import org.apache.hadoop.fs.Path
import org.apache.spark.SparkException
import org.apache.spark.ml.feature.FFMNodesModel.FFMNodesModelWriter
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared.HasOutputCol
import org.apache.spark.ml.util._
import org.apache.spark.ml.{Estimator, Model}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Dataset, Row}

import scala.collection.mutable.ArrayBuffer

/**
  * @Author songhaicheng
  * @Date 2019/2/20 10:45
  * @Description
  * @Reviewer
  */
private[feature] trait FFMNodesParam extends Params with HasOutputCol {

  final val handleInvalid: Param[String] = new Param[String](this, "handleInvalid",
    "how to handle invalid data (unseen labels or NULL values). " +
    "Options are 'skip' (filter out rows with invalid data), error (throw an error), " +
    "or 'keep' (put invalid data in a special additional bucket, at index numLabels).",
    ParamValidators.inArray(FFMNodesEncoder.supportedHandleInvalids))

  setDefault(handleInvalid, FFMNodesEncoder.ERROR_INVALID)

  def getHandleInvalid: String = $(handleInvalid)

  final val oneHotCols: StringArrayParam = new StringArrayParam(this, "oneHotCols",
    "使用 OntHot 编码进行处理的列，数据类型必须为数字或字符类型")

  setDefault(oneHotCols, Array[String]())

  def getOneHotCols: Array[String] = $(oneHotCols)

  final val vectorCols: StringArrayParam = new StringArrayParam(this, "vectorCols",
    "离散值列，数据类型必须为字符数组 Array[String]")

  setDefault(vectorCols, Array[String]())

  def getVectorCols: Array[String] = $(vectorCols)

  final val continuousCols: StringArrayParam = new StringArrayParam(this, "continuousCols",
    "连续变量列，数据类型必须为数字类型")

  setDefault(continuousCols, Array[String]())

  def getContinuousCols: Array[String] = $(continuousCols)

  protected def validateAndTransformSchema(schema: StructType): StructType = {
    val colsLen = $(oneHotCols).length + $(vectorCols).length + $(continuousCols).length
    require(colsLen > 0, "none will be encoded")
    require(!($(oneHotCols).contains($(outputCol)) ||
      $(vectorCols).contains($(outputCol)) ||
      $(continuousCols).contains($(outputCol))),
      s"outputCol 输出列名 $getOutputCol 已存在")
    require($(oneHotCols).union($(vectorCols)).union($(continuousCols)).distinct.length == colsLen,
      "一列只能出现在一个参数中")

    val oneHotInputFields = $(oneHotCols).map(schema(_))
    oneHotInputFields.foreach(f => require(f.dataType == StringType || f.dataType.isInstanceOf[NumericType],
      s"OneHot 列数据类型必须是字符串或数字类型，但列 ${f.name} 是 ${f.dataType} 类型"))

    val vectorInputFields = $(vectorCols).map(schema(_))
    vectorInputFields.foreach(f => require(f.dataType == ArrayType(StringType),
      s"离散值列数据类型必须是字符数组 ArrayType(StringType)，但列 ${f.name} 是 ${f.dataType} 类型"))

    val continuousInputField = $(continuousCols).map(schema(_))
    continuousInputField.foreach(f => require(f.dataType.isInstanceOf[NumericType],
      s"连续变量列类型必须是数字类型，但列 ${f.name} 是 ${f.dataType} 类型"))
    SchemaUtils.appendColumn(schema, $(outputCol), ArrayType(StringType))

  }

}

class FFMNodesEncoder (
                        override val uid: String) extends Estimator[FFMNodesModel]
  with FFMNodesParam with DefaultParamsWritable {

  def this() = this(Identifiable.randomUID("ffm_node"))

  def setHandleInvalid(value: String): this.type = set(handleInvalid, value)

  def setOneHotCols(value: Array[String]): this.type = set(oneHotCols, value)

  def setVectorCols(value: Array[String]): this.type = set(vectorCols, value)

  def setContinuousCols(value: Array[String]): this.type = set(continuousCols, value)

  def setOutputCol(value: String): this.type = set(outputCol, value)

  override def fit(dataset: Dataset[_]): FFMNodesModel = {
    transformSchema(dataset.schema, logging = true)

    // 每一个输入列的域值映射表
    var field = Map[String, Int]()
    // 每一个输入列的 feature 初始索引值映射表
    var start = Map[String, Int]()
    // 每一个输入列的字典射表
    var vocabulary = Map[String, Array[String]]()
    // 域索引
    var fieldIdx = 0
    // feature 初始索引累计值 startAcc
    var startAcc = 0
    // 处理 OneHot 列，先遍历每行值，建立字典，然后累积域索引和 features 初始索引
    for (oneHotCol <- $(oneHotCols)) {
      val counts = dataset.na.drop(Array(oneHotCol)).select(col(oneHotCol))
        .rdd
        .map(_.get(0).toString)
        .countByValue()
      val labels = counts.toSeq.sortBy(-_._2).map(_._1).toArray
      // 保存处理结果
      field += (oneHotCol -> fieldIdx)
      start += (oneHotCol -> startAcc)
      vocabulary += (oneHotCol -> labels)
      // 索引值累加
      fieldIdx += 1
      // 当需要处理异常值（训练数据未出现）时，需要为异常值占个索引位
      val offset = getHandleInvalid match {
        case FFMNodesEncoder.HANDLE_INVALID => labels.length + 1
        case _ => labels.length
      }
      startAcc += offset
    }
    // 处理离散值列，同样要遍历每行值，建立字典和累积索引，但是其每行是数组类型，值数量不一定
    for (vectorCol <- $(vectorCols)) {
      val counts = dataset.na.drop(Array(vectorCol)).select(col(vectorCol))
        .rdd
        .flatMap(_.getAs[Seq[String]](0))
        .countByValue()
      val labels = counts.toSeq.sortBy(-_._2).map(_._1).toArray
      field += (vectorCol -> fieldIdx)
      start += (vectorCol -> startAcc)
      vocabulary += (vectorCol -> labels)
      fieldIdx += 1
      val offset = getHandleInvalid match {
        case FFMNodesEncoder.HANDLE_INVALID => labels.length + 1
        case _ => labels.length
      }
      startAcc += offset
    }
    // 处理连续值列，不需要建立字典
    for (continuousCol <- $(continuousCols)) {
      field += (continuousCol -> fieldIdx)
      start += (continuousCol -> startAcc)
      vocabulary += (continuousCol -> Array[String]())
      fieldIdx += 1
      startAcc += 1
    }
    copyValues(new FFMNodesModel(uid, field, start, vocabulary))
  }

  override def copy(extra: ParamMap): Estimator[FFMNodesModel] = defaultCopy(extra)

  override def transformSchema(schema: StructType): StructType = validateAndTransformSchema(schema)
}

object FFMNodesEncoder extends DefaultParamsReadable[FFMNodesModel] {
  private[feature] val ERROR_INVALID: String = "error"
  private[feature] val HANDLE_INVALID: String = "handle"
  private[feature] val supportedHandleInvalids: Array[String] =
    Array(ERROR_INVALID, HANDLE_INVALID)

  override def load(path: String): FFMNodesModel = super.load(path)
}


class FFMNodesModel (
                      override val uid: String,
                      val field: Map[String, Int],
                      val start: Map[String, Int],
                      val vocabulary: Map[String, Array[String]])
  extends Model[FFMNodesModel] with FFMNodesParam with MLWritable {

  def setHandleInvalid(value: String): this.type = set(handleInvalid, value)

  def setOneHotCols(value: Array[String]): this.type = set(oneHotCols, value)

  def setVectorCols(value: Array[String]): this.type = set(vectorCols, value)

  def setContinuousCols(value: Array[String]): this.type = set(continuousCols, value)

  def setOutputCol(value: String): this.type = set(outputCol, value)

  override def copy(extra: ParamMap): FFMNodesModel = {
    val copied = new FFMNodesModel(uid, field, start, vocabulary)
    copyValues(copied, extra).setParent(parent)
  }

  override def write: FFMNodesModelWriter = new FFMNodesModelWriter(this)

  override def transform(dataset: Dataset[_]): DataFrame = {
    transformSchema(dataset.schema, logging = true)

    // 是否处理 OneHot 和离散值列的异常值
    val handle = getHandleInvalid.equals(FFMNodesEncoder.HANDLE_INVALID)

    val getNodeVal = udf { row: Row => {
      var res = new ArrayBuffer[String]()
      for (fieldName <- row.schema.fieldNames) {
        if (!field.isDefinedAt(fieldName)) {
          throw new SparkException(s"Invalid column $fieldName")
        }
        val fieldVal = field(fieldName)
        val startVal = start(fieldName)
        val dict = vocabulary(fieldName)
        if ($(oneHotCols).contains(fieldName)) {
          // OneHot col's FFM Node 值 = fieldIdx:start+OneHotVal:1.0
          val label = row.getAs(fieldName).toString
          if (dict.contains(label)) {
            res += s"$fieldVal:${startVal + dict.indexOf(label)}:1.0"
          } else if (handle) {
            res += s"$fieldVal:${startVal + dict.length}:1.0"
          } else {
            throw new SparkException(s"Unseen label: $label. To handle unseen labels, " +
              s"set Param handleInvalid to ${FFMNodesEncoder.HANDLE_INVALID}.")
          }
        } else if ($(vectorCols).contains(fieldName)) {
          // 离散值 col's FFM Node 值 = fieldIdx:start+idx:1.0
          res ++= row.getAs[Seq[String]](fieldName).map( word => {
            if (dict.contains(word)) {
              s"$fieldVal:${startVal + dict.indexOf(word)}:1.0"
            } else if (handle) {
              s"$fieldVal:${startVal + dict.length}:1.0"
            }else {
              throw new SparkException(s"Unseen word: $word. To handle unseen words, " +
                s"set Param handleInvalid to ${FFMNodesEncoder.HANDLE_INVALID}.")
            }
          })
        } else if ($(continuousCols).contains(fieldName)) {
          // 连续值 col's FFM Node 值 = fieldIdx:start:v
          val v = row.getAs(fieldName).toString.toDouble
          res += s"$fieldVal:$startVal:$v"
        } else {
          throw new SparkException(s"Invalid column $fieldName")
        }
      }
      res.toArray
    }}
    dataset.withColumn($(outputCol),
      getNodeVal(struct(($(oneHotCols) ++ $(vectorCols) ++ $(continuousCols)).map(dataset.col):_*)))
  }

  override def transformSchema(schema: StructType): StructType = {
    validateAndTransformSchema(schema)
  }
}

object FFMNodesModel extends MLReadable[FFMNodesModel] {
  private[FFMNodesModel]
  class FFMNodesModelWriter(instance: FFMNodesModel) extends MLWriter {
    private case class Data(field: Map[String, Int],
                            start: Map[String, Int],
                            vocabulary: Map[String, Array[String]])

    /**
      * 将模型训练生成的字典等信息保存下来
      * @param path
      */
    override protected def saveImpl(path: String): Unit = {
      DefaultParamsWriter.saveMetadata(instance, path, sc)
      val data = Data(instance.field, instance.start, instance.vocabulary)
      val dataPath = new Path(path, "data").toString
      sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
    }
  }

  private class FFMNodesModelReader extends MLReader[FFMNodesModel] {
    private val className = classOf[FFMNodesModel].getName

    /**
      * 提取出模型文件保存的字典等信息
      * @param path
      * @return
      */
    override def load(path: String): FFMNodesModel = {
      val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
      val dataPath = new Path(path, "data").toString
      val data = sparkSession.read.parquet(dataPath)
        .select("field", "start", "vocabulary")
        .head()
      val field = data.getAs[Map[String, Int]](0)
      val start = data.getAs[Map[String, Int]](1)
      var vocabulary = Map[String, Array[String]]()
      data.getAs[Map[String, Seq[String]]](2).foreach(d => vocabulary += (d._1 -> d._2.toArray))
      val model = new FFMNodesModel(metadata.uid, field, start, vocabulary)
      DefaultParamsReader.getAndSetParams(model, metadata)
      model
    }
  }

  override def read: MLReader[FFMNodesModel] = new FFMNodesModelReader

  override def load(path: String): FFMNodesModel = super.load(path)
}