package org.apache.spark.ml.classification

import breeze.linalg.{norm, DenseVector => BDV}
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkException
import org.apache.spark.internal.Logging
import org.apache.spark.ml.classification.FFMModel.FFMModelWriter
import org.apache.spark.ml.linalg.{BLAS, Vector, VectorUDT, Vectors}
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.util._
import org.apache.spark.mllib.classification.FFMWithAdag
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Dataset, Row}

import scala.collection.mutable.ArrayBuffer
import scala.util.Random

/**
  * @Author songhaicheng
  * @Date 2019/2/21 15:56
  * @Description
  * @Reviewer
  */
private[classification] trait FFMParams extends ProbabilisticClassifierParams
  with HasMaxIter with HasTol with HasRegParam with HasAggregationDepth {


  final val normalization: BooleanParam = new BooleanParam(this, "normalization",
    "是否正则化实例")

  setDefault(normalization, true)

  def getNormalization: Boolean = $(normalization)

  final val optimizer: Param[String] = new Param[String](this, "optimizer",
    "优化方法，支持 sgd 和 adag", ParamValidators.inArray(FFM.OPTIMIZER))

  setDefault(optimizer, FFM.SGD)

  def getOptimizer: String = $(optimizer)

  final val k: IntParam = new IntParam(this, "k",
    "隐藏向量空间长度，大于等于 2", ParamValidators.gtEq(2))

  setDefault(k, 2)

  def getK: Int = $(k)

  final val partitions: IntParam = new IntParam(this, "partitions",
    "分区数", ParamValidators.gtEq(0))

  setDefault(partitions, 0)

  def getPartitions: Int = $(partitions)

  final val stepSize: DoubleParam = new DoubleParam(this, "stepSize",
    "迭代步长，大于等于 0.0", ParamValidators.gtEq(0.0))

  setDefault(stepSize, 0.01)

  def getStepSize: Double = $(stepSize)

  final val subsamplingRate: DoubleParam = new DoubleParam(this, "subsamplingRate",
    "训练数据抽样比例，(0, 1]",
    ParamValidators.inRange(0, 1, lowerInclusive = false, upperInclusive = true))

  setDefault(subsamplingRate, 1.0)

  def getMiniBatch: Double = $(subsamplingRate)

  // 输入特征类型改成自定义的
  //def featuresDataType: DataType = ArrayType(StringType)

  override protected def validateAndTransformSchema(
                                                     schema: StructType,
                                                     fitting: Boolean,
                                                     featuresDataType: DataType): StructType = {
    SchemaUtils.checkColumnType(schema, $(featuresCol), ArrayType(StringType))
    if (fitting) {
      SchemaUtils.checkNumericType(schema, $(labelCol))
    }
    SchemaUtils.appendColumn(schema, $(predictionCol), DoubleType)
  }

}

case class Node(field: Int, feature: Int, value: Double) {
  // 从 Spark Row 中解析出 Node 自定义类型
  def this(row: Row) = this(row.getAs[Int]("field"), row.getAs[Int]("feature"), row.getAs[Double]("value"))
}

class FFM (
            override val uid: String)
  extends ProbabilisticClassifier[Seq[String], FFM, FFMModel]
    with FFMParams with DefaultParamsWritable with Logging {

  def this() = this(Identifiable.randomUID("ffm"))

  def setOptimizer(value: String): this.type = set(optimizer, value)
  setDefault(optimizer -> "sgd")

  def setNormalization(value: Boolean): this.type = set(normalization, value)
  setDefault(normalization -> true)

  def setMaxIter(value: Int): this.type = set(maxIter, value)
  setDefault(maxIter -> 100)

  def setK(value: Int): this.type = set(k, value)
  setDefault(k -> 2)

  def setPartitions(value: Int): this.type = set(partitions, value)
  setDefault(partitions, 0)

  def setAggregationDepth(value: Int): this.type  = set(aggregationDepth, value)
  setDefault(aggregationDepth -> 2)

  def setConvergenceTol(value: Double): this.type = set(tol, value)
  setDefault(tol -> 0.001)

  def setMiniBatch(value: Double): this.type = set(subsamplingRate, value)
  setDefault(subsamplingRate -> 1.0)

  def setStepSize(value: Double): this.type = set(stepSize, value)
  setDefault(stepSize -> 0.0)

  def setRegParam(value: Double): this.type = set(regParam, value)
  setDefault(regParam -> 0.0)

  protected def validateAndTransformSchema(schema: StructType): StructType = {
    SchemaUtils.checkNumericType(schema, $(labelCol), "标签列必须为数字类型")
    StructType(schema.fields :+
      StructField($(rawPredictionCol), new VectorUDT) :+
      StructField($(probabilityCol), new VectorUDT) :+
      StructField($(predictionCol), DoubleType))
  }

  override def copy(extra: ParamMap): FFM = defaultCopy(extra)

  /*override protected def train(dataset: Dataset[_]): FFMModel = {
    validateAndTransformSchema(dataset.schema)

    val labeling = udf { label: Double => {
      if (label > 0) {
        1.0
      } else {
        -1.0
      }
    }}

    val fetchNodes = udf { strArr: Seq[String] => {
      strArr.map(str => {
        val f = str.split(":")
        Node(f(0).toInt, f(1).toInt, f(2).toDouble)
      })
    }}

    val data = dataset.select(labeling(dataset.col($(labelCol)).cast(DoubleType)), fetchNodes(dataset.col($(featuresCol))))
      .rdd
      .map(row => (row.getAs[Double](0), row.getSeq[Row](1).map(new Node(_))))
    //.repartition(5)

    // 解析出 m (field length), n (feature length) 值
    val (m, n) = data.map(_._2).treeAggregate(0, 0)(
      seqOp = (num, node) => {
        val mx = node.map(_.field).max
        val nx = node.map(_.feature).max
        (Seq(mx, num._1).max, Seq(nx, num._2).max)
      },
      combOp = (x1, x2) => {
        (Seq(x1._1, x2._1).max, Seq(x2._1, x2._2).max)
      }
    )

    // 选择优化方法
    val sgd = $(optimizer) match {
      case FFM.SGD => true
      case FFM.ADAG => false
      case _ =>
        throw new SparkException(s"无效优化参数 $getOptimizer")
    }
    // 初始化算法参数
    val initWeights = genInitWeights(m, n, sgd)
    // 迭代优化算法参数
    val weights = optimize(data, initWeights, m, n, sgd)
    println(s"initweight: ${initWeights.toArray.mkString(", ")}")
    copyValues(new FFMModel(uid, 2, n, m, Vectors.fromBreeze(weights)))
  }*/

  override protected def train(dataset: Dataset[_]): FFMModel = {
    logParams(dataset)
    validateAndTransformSchema(dataset.schema)

    val labeling = udf { label: Double => {
      if (label > 0) {
        1.0
      } else {
        -1.0
      }
    }}

    val fetchNodes = udf { strArr: Seq[String] => {
      strArr.map(str => {
        val f = str.split(":")
        Node(f(0).toInt, f(1).toInt, f(2).toDouble)
      })
    }}

    val data = dataset.select(labeling(dataset.col($(labelCol)).cast(DoubleType)), fetchNodes(dataset.col($(featuresCol))))
      .rdd
      .map(row => (row.getAs[Double](0), row.getSeq[Row](1).map(new Node(_))))

    // 解析出 m (field length), n (feature length) 值
    val (m, n) = data.map(_._2).treeAggregate(0, 0)(
      seqOp = (num, node) => {
        val mx = node.map(_.field).max
        val nx = node.map(_.feature).max
        (Seq(mx, num._1).max, Seq(nx, num._2).max)
      },
      combOp = (x1, x2) => {
        (Seq(x1._1, x2._1).max, Seq(x2._1, x2._2).max)
      }
    )

    var data2 = data.map(d => (d._1, d._2.map(n => (n.field, n.feature, n.value)).toArray))
    if ($(partitions) > 0) {
      data2 = data2.repartition($(partitions))
    }

    // 选择优化方法
    val sgd = $(optimizer) match {
      case FFM.SGD => true
      case FFM.ADAG => false
      case _ =>
        throw new SparkException(s"无效优化参数 $getOptimizer")
    }

    val weights = FFMWithAdag.train(data2, m, n, (false, false, $(k)), $(maxIter),
      $(stepSize), $(regParam), $(normalization), random = true, $(optimizer), $(aggregationDepth))._2

    copyValues(new FFMModel(uid, 2, n, m, Vectors.dense(weights)))
  }

  override def transformSchema(schema: StructType): StructType = validateAndTransformSchema(schema)

  // 随机初始化隐向量
  private def genInitWeights(
                              m: Int,
                              n: Int,
                              sgd: Boolean): BDV[Double] = {
    val w = if (sgd) {
      new Array[Double](n * m * $(k))
    } else {
      new Array[Double](n * m * $(k) * 2)
    }
    val coef = 1.0 / Math.sqrt($(k))
    val rand = new Random()

    if (sgd) {
      for (i <- 0 until m * n * $(k)) {
        w(i) = coef * rand.nextDouble()
      }
    } else {
      var wIdx = 0
      for (_ <- 0 until m; _ <- 0 until n; i <- 0 until 2 * $(k)) {
        w(wIdx) = if (i < $(k)) {
          coef * rand.nextDouble()
        } else {
          1.0
        }
        wIdx += 1
      }
    }
    BDV(w)
  }

  private def optimize(
                        data: RDD[(Double, Seq[Node])],
                        initWeights: BDV[Double],
                        m: Int,
                        n: Int,
                        sgd: Boolean): BDV[Double] = {
    val numIterations = $(maxIter)
    val stochasticLossHistory = new ArrayBuffer[Double](numIterations)
    // Record previous weight and current one to calculate solution vector difference
    var previousWeights: Option[BDV[Double]] = None
    var currentWeights: Option[BDV[Double]] = None

    var weights = initWeights.copy

    // 是否收敛
    var converged = false
    var i = 0
    // 一直优化损失函数，直到收敛或完成所有迭代
    while (!converged && i < numIterations) {
      val bcWeights = data.context.broadcast(weights)

      // 计算累计梯度下降方向、损失值、迭代
      val (gSum, lSum, miniBatchSize) = data
        //.sample(withReplacement = false, $(subsamplingRate), 123 + i)
        .treeAggregate((BDV.zeros[Double](bcWeights.value.length), 0.0, 0L))(
        //.treeAggregate((BDV.zeros[Double](bcWeights.value.length), 0.0, 0L))(
        seqOp = (c, v) => {
          val label = v._1
          val nodes = v._2
          // 是否正则化
          val r = if ($(normalization)) {
            val norm = nodes.map(n => math.pow(n.value, 2)).sum
            1.0 / norm
          } else {
            1.0
          }
          val (align0, align1) = if(sgd) {
            ($(k), m * $(k))
          } else {
            ($(k) * 2, m * $(k) * 2)
          }
          // 计算 损失值（同时梯度递增函数会不断累加）
          val loss = computeFFM(label, nodes, bcWeights.value, c._1, m, n, r, align0, align1, sgd)

          (c._1, c._2 + loss, c._3 + 1)
        },
        combOp = (c1, c2) => {
          (c1._1 += c2._1, c1._2 + c2._2, c1._3 + c2._3)
        }
      )
      bcWeights.destroy(true)

      println("gSum is " + gSum.toArray.filter(_ < 0.0).mkString(" "))

      logInfo(s"iter $i, gSum is ${gSum.toArray.mkString(", ")}")

      if (miniBatchSize > 0) {
        stochasticLossHistory += lSum / miniBatchSize
        // 根据累加的梯度下降方向进行 weight 更新
        //weights = wSum / BDV.fill[Double](wSum.length, miniBatchSize)
        val thisIterStepSize = $(stepSize) / math.sqrt(i + 1)
        BLAS.axpy(thisIterStepSize, Vectors.fromBreeze(gSum), Vectors.fromBreeze(weights))
        println(s"weight after: ${weights.toArray.max}")
        //weights = weights + gSum
        logInfo(s"lSum is $lSum, miniBatchSize is $miniBatchSize")
        println("iter:" + (i + 1) + ",tr_loss:" + lSum / miniBatchSize)
        // 判断是否满足收敛条件
        previousWeights = currentWeights
        currentWeights = Some(weights)
        if (previousWeights.isDefined && currentWeights.isDefined) {
          converged = isConverged(previousWeights.get, currentWeights.get, $(tol))
        }
      } else {
        logWarning(s"Iteration ($i/$numIterations). The size of sampled batch is zero")
      }
      i += 1
    }

    logInfo(s"Optimization $getOptimizer finished." +
      s" Last 10 losses ${stochasticLossHistory.takeRight(10).mkString(", ")}")

    weights
  }

  // 计算 FFM 值，见公式
  private def computeFFM(
                          label: Double,
                          nodes: Seq[Node],
                          weights: BDV[Double],
                          gradients: BDV[Double],
                          m: Int,
                          n: Int,
                          r: Double,
                          align0: Int,
                          align1: Int,
                          sgd: Boolean): Double = {
    val weightsArray = weights.toArray

    val t = ffm(nodes, weightsArray, m, n, r, align0, align1)
    // 计算 kappa 系数
    //val expnyt = math.exp(-label * t)
    val expnyt = math.exp(-math.max(math.min(label * t, 35.0), -35.0))
    val kappa = -label * expnyt / (1 + expnyt)
    val trLoss = math.log(1 + expnyt)

    // 进行梯度下降更新系数
    BLAS.axpy(1.0, Vectors.fromBreeze(gradientFFM(nodes, weights, m, n, r, align0, align1, kappa)), Vectors.fromBreeze(gradients))

    trLoss
  }

  // 更新 weight
  private def gradientFFM(
                           nodes: Seq[Node],
                           weights: BDV[Double],
                           m: Int,
                           n: Int,
                           r: Double,
                           align0: Int,
                           align1: Int,
                           kappa: Double): BDV[Double] = {
    val lambda = $(regParam)
    val eta = $(stepSize)
    val gradients = new Array[Double](weights.size)
    for (n1 <- nodes.indices) {
      val f1 = nodes(n1).field
      val j1 = nodes(n1).feature - 1
      val v1 = nodes(n1).value
      if (f1 < m && j1 < n) {
        for (n2 <- n1 + 1 until nodes.length) {
          val f2 = nodes(n2).field
          val j2 = nodes(n2).feature
          val v2 = nodes(n2).value
          if (f2 < m && j2 < n) {
            val w1Idx = j1 * align1 + f2 * align0
            val w2Idx = j2 * align1 + f1 * align0
            val wg1Idx = w1Idx + $(k)
            val wg2Idx = w2Idx + $(k)
            val v = v1 * v2 * r
            val kappav = kappa * v
            for (d <- 0 until $(k)) {
              val w1 = weights(w1Idx + d)
              val w2 = weights(w2Idx + d)
              val wg1 = weights(wg1Idx + d)
              val wg2 = weights(wg2Idx + d)
              val g1 = lambda * w1 + kappav * w2
              val g2 = lambda * w2 + kappav * w1

              gradients(w1Idx + d) = -eta / math.sqrt(wg1) * g1
              gradients(w2Idx + d) = -eta / math.sqrt(wg2) * g2
              gradients(wg1Idx + d) = -gradients(wg1Idx + d) + g1 * g2
              gradients(wg2Idx + d) = -gradients(wg2Idx + d) + g1 * g2
            }
          }
        }
      }
    }
    BDV(gradients)
  }

  private def ffm(
                   nodes: Seq[Node],
                   weights: Array[Double],
                   m: Int,
                   n: Int,
                   r: Double,
                   align0: Int,
                   align1: Int): Double = {
    var t = 0.0
    for (n1 <- nodes.indices) {
      val f1 = nodes(n1).field
      val j1 = nodes(n1).feature
      val v1 = nodes(n1).value
      if (f1 < m && j1 < n) {
        for (n2 <- n1 + 1 until nodes.length) {
          val f2 = nodes(n2).field
          val j2 = nodes(n2).feature
          val v2 = nodes(n2).value
          if (f2 < m && j2 < n) {
            val w1Idx = j1 * align1 + f2 * align0
            val w2Idx = j2 * align1 + f1 * align0
            val v = v1 * v2 * r
            for (d <- 0 until $(k)) {
              val w1 = weights(w1Idx + d)
              val w2 = weights(w2Idx + d)
              t += w1 * w2 * v
            }
          }
        }
      }
    }
    t
  }

  // 判断损失函数波动区间是否满足设置的阈值
  private def isConverged(
                           previousWeights: BDV[Double],
                           currentWeights: BDV[Double],
                           convergenceTol: Double): Boolean = {
    val solutionVecDiff = norm(previousWeights - currentWeights)
    solutionVecDiff < convergenceTol * Math.max(norm(currentWeights), 1.0)
    false
  }

  private[spark] def logParams(dataset: Dataset[_]): Unit = {
    logInfo("==================== Params ====================")
    val instr = Instrumentation.create(this, dataset)
    instr.logParams(labelCol, featuresCol, predictionCol, rawPredictionCol, probabilityCol,
      optimizer, normalization, maxIter, k, partitions, aggregationDepth, tol, subsamplingRate, stepSize, regParam)
  }

}

object FFM extends DefaultParamsReadable[FFM] {
  private[classification] val SGD: String = "sgd"
  private[classification] val ADAG: String = "adag"
  private[classification] val OPTIMIZER: Array[String] =
    Array(SGD, ADAG)
}

class FFMModel (
                 override val uid: String,
                 override val numClasses: Int,
                 override val numFeatures: Int,
                 val numFields: Int,
                 val weights: Vector)
  extends ProbabilisticClassificationModel[Seq[String], FFMModel]
    with FFMParams with MLWritable {

  override def raw2probabilityInPlace(rawPrediction: Vector): Vector = {
    // sigmoid
    Vectors.dense(rawPrediction.toArray.map(p => 1 / (1 + math.exp(-p))))
  }

  override def predictRaw(features: Seq[String]): Vector = {
    val (m, n) = (numFields, numFeatures)
    val nodes = features.map(s => {
      val ss = s.split(":")
      (ss(0).toInt, ss(1).toInt, ss(2).toDouble)
    })
    val r = if ($(normalization)) {
      val norm = nodes.map(n => math.pow(n._3, 2)).sum
      1.0 / norm
    } else {
      1.0
    }
    val (align0, align1) = if($(optimizer).equals(FFM.SGD)) {
      ($(k), m * $(k))
    } else {
      ($(k) * 2, m * $(k) * 2)
    }
    var ffm = 0.0
    for (n1 <- nodes.indices) {
      // field
      val f1 = nodes(n1)._1
      // feature
      val j1 = nodes(n1)._2
      // value
      val v1 = nodes(n1)._3
      if (f1 < m && j1 < n) {
        for (n2 <- n1 + 1 until nodes.length) {
          val f2 = nodes(n2)._1
          val j2 = nodes(n2)._2
          val v2 = nodes(n2)._3
          if (f2 < m && j2 < n) {
            val w1Idx = j1 * align1 + f2 * align0
            val w2Idx = j2 * align1 + f1 * align0
            val v = v1 * v2 * r
            for (d <- 0 until $(k)) {
              val w1 = weights(w1Idx + d)
              val w2 = weights(w2Idx + d)
              ffm += w1 * w2 * v
            }
          }
        }
      }
    }
    Vectors.dense(Array(-ffm, ffm))
  }

  override def write: MLWriter = new FFMModelWriter(this)

  override def copy(extra: ParamMap): FFMModel = {
    val copied = new FFMModel(uid, numClasses, numFeatures, numFields, weights)
    copyValues(copied, extra).setParent(parent)
  }
}

object FFMModel extends MLReadable[FFMModel] {

  private[FFMModel]
  class FFMModelWriter(instance: FFMModel) extends MLWriter {
    private case class Data(numClasses: Int,
                            numFeatures: Int,
                            numFields: Int,
                            weights: Vector)

    override protected def saveImpl(path: String): Unit = {
      DefaultParamsWriter.saveMetadata(instance, path, sc)
      val data = Data(instance.numClasses, instance.numFeatures, instance.numFields, instance.weights)
      val dataPath = new Path(path, "data").toString
      sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
    }
  }

  private class FFMModelReader extends MLReader[FFMModel] {
    private val className = classOf[FFMModel].getName

    override def load(path: String): FFMModel = {
      val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
      val dataPath = new Path(path, "data").toString
      val data = sparkSession.read.parquet(dataPath)
        .select("numClasses", "numFeatures", "numFields", "weights")
        .head()
      val numClasses = data.getAs[Int](0)
      val numFeatures = data.getAs[Int](1)
      val numFields = data.getAs[Int](2)
      val weights = data.getAs[Vector](3)
      val model = new FFMModel(metadata.uid, numClasses, numFeatures, numFields, weights)
      DefaultParamsReader.getAndSetParams(model, metadata)
      model
    }
  }

  override def read: MLReader[FFMModel] = new FFMModelReader

  override def load(path: String): FFMModel = super.load(path)
}