package org.apache.spark.ml.classification

import breeze.linalg.{norm, DenseVector => BDV}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.mllib.classification.FFMWithAdag
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.types._
import org.scalatest.FunSuite

/**
  * @Author songhaicheng
  * @Date 2019/2/26 13:51
  * @Description
  * @Reviewer
  */
class FFMSuite extends FunSuite {

  lazy val spark = SparkSession
    .builder
    .master("local")
    .appName("Spark Test")
    .getOrCreate()

  test("ffm") {
    val txt = spark.read.text("E:/projects/train_data_sample.txt").rdd.map(row => {
      //val txt = spark.read.text("E:/projects/ffm.csv").rdd.map(row => {
      //val txt = spark.read.text("E:/projects/ffm_data1.txt").rdd.map(row => {
      val rd = row.getString(0).split(" ")
      Row.fromSeq(Seq(rd.take(1)(0).toDouble, rd.drop(1)))
    })
    val st = StructType(Array(StructField("label", DoubleType), StructField("features", DataTypes.createArrayType(StringType))))
    val data = spark.createDataFrame(txt, st)

    val ffm = new FFM()
      .setK(4)
      .setLabelCol("label")
      .setFeaturesCol("features")
      .setMiniBatch(1.0)
      .setMaxIter(20)
      .setOptimizer("adag")
      .setStepSize(0.1)
      .setRegParam(0.002)
      .setNormalization(true)
      .setConvergenceTol(0.1)
    val res = ffm.fit(data).transform(data)

    val scores: RDD[(Double, Double)] = res.rdd.map(x => {
      (x.getAs[Double]("label"), x.getAs[Double]("prediction"))
    })

    res.show()
    //res.select("rawPrediction").rdd.take(10).map(_.get(0).toString).foreach(println(_))
    /*println("1.0: " + res.filter(res.col("prediction").equalTo(1.0)).count())
    println("0.0: " + res.filter(res.col("prediction").equalTo(0.0)).count())*/
    val metrics = new BinaryClassificationMetrics(scores)
    val auROC = metrics.areaUnderROC
    val auPRC = metrics.areaUnderPR
    val accuracy = scores.filter(x => x._1 == x._2).count().toDouble / scores.count()
    println(s"accuracy = $accuracy, Area under ROC = $auROC, Area under precision-recall curve = $auPRC")
    val cnt = scores.count()
    val pCnt = scores.collect().map(_._1).count(_ == 1.0)
    println(pCnt + " / " + cnt)
  }

  test("ffm1") {
    //val train = spark.read.text("E:/projects/ffm_data1.txt").rdd.map(row => {
    //val train = spark.read.text("E:/projects/train_ffm.csv").rdd.map(row => {           // 40, 4308
    //val train = spark.read.text("E:/projects/small_train.txt").rdd.map(row => {   // 18, 9406
    val train = spark.read.text("E:/projects/ffm_train_data.txt").rdd.map(row => {   // 39, 59792
      val rd = row.getString(0).split(",")
      val l = if (rd.take(1)(0).toInt > 0) {
        1.0
      } else {
        -1.0
      }
      (l, rd.drop(1).map(s => {
        val ss = s.split(":")
        (ss(0).toInt, ss(1).toInt, ss(2).toDouble)
      }))
    }).repartition(5)
    val test = spark.read.text("E:/projects/ffm_train_data.txt").rdd.map(row => {
      val rd = row.getString(0).split(",")
      val l = if (rd.take(1)(0).toInt > 0) {
        1.0
      } else {
        -1.0
      }
      (l, rd.drop(1).map(s => {
        val ss = s.split(":")
        (ss(0).toInt, ss(1).toInt, ss(2).toDouble)
      }))
    })
    /*val txt = spark.read.text("E:/projects/ffm.csv").rdd.map(row => {
      val rd = row.getString(0).split(" ")
      val l = if (rd.take(1)(0).toInt > 0) {
        1.0
      } else {
        -1.0
      }
      (l, rd.drop(1).map(s => {
        val ss = s.split(":")
        (ss(0).toInt, ss(1).toInt, ss(2).toDouble)
      }))
    })
    val random = txt.randomSplit(Array(0.7, 0.3))
    val (train, test) = (random(0), random(1))*/

    val m = FFMWithAdag.train(train, 39, 59792, (false, false, 10), 20,
      0.1, 0.002, true, true, "adagrad", 2)

    /*val txt = spark.read.text("E:/projects/ffm_data1.txt").rdd.map(row => {
      val rd = row.getString(0).split(" ")
      val l = if (rd.take(1)(0).toInt > 0) {
        1.0
      } else {
        -1.0
      }
      (l, rd.drop(1).map(s => {
        val ss = s.split(":")
        (ss(0).toInt, ss(1).toInt, ss(2).toDouble)
      }))
    })
    val random = txt.randomSplit(Array(0.7, 0.3))
    val (train, test) = (random(0), random(1))
    val m = FFMWithAdag.train(txt, 14, 83, (false, false, 4), 20,
      0.01, 0.00002, true, true, "sgd")*/




    val scores: RDD[(Double, Double)] = train.map(x => {
      val p = m._1.predict(x._2)
      val ret = if (p >= 0.5) 1.0 else -1.0
      (ret, x._1)
    })

    val metrics = new BinaryClassificationMetrics(scores)
    val auROC = metrics.areaUnderROC
    val auPRC = metrics.areaUnderPR
    val accuracy = scores.filter(x => x._1 == x._2).count().toDouble / scores.count()
    println(s"accuracy = $accuracy, Area under ROC = $auROC, Area under precision-recall curve = $auPRC")

    val cnt = scores.count()
    val pCnt = scores.collect().map(_._1).count(_ == 1.0)
    println(pCnt + " / " + cnt)
  }

  test("tt") {
    /*val a = BDV(Array(1.0, 2.0, 3.0))
    val b = BDV(Array(5.0, 6.0, 7.0))
    val c = a.copy
    c += b
    println(a(2) * b(1))
    println(b / BDV.fill[Double](b.length, 2L))*/
    var data = spark.read.option("header", "true").csv("E:/projects/train_data.csv").distinct()

    val tagCols = ("tag0,tag1,tag2,tag3,tag4,tag5,tag6,tag7,tag8,tag9,tag10,tag11,tag12," +
      "tag13,tag14,tag15,tag16,tag17,tag18,tag19,tag20,tag21,tag22,tag23,tag24,tag25,tag26,tag27,tag28,tag29," +
      "tag30,tag31,tag32,tag33,tag34,tag35,tag36").split(",")

    for (col <- tagCols) {
      data = data.withColumn(col, data(col).cast(IntegerType))
    }
    data = data.withColumn("target", data.col("target").cast(DoubleType))

    data.rdd.map(_.mkString("\t")).repartition(1).saveAsTextFile("E:/projects/train_data.txt")
  }

}
