package cn.doitedu.sparkml.knn

import cn.doitedu.commons.utils.SparkUtil
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}


/**
  * @author: 余辉
  * @blog: https://blog.csdn.net/silentwolfyh
  * @create: 2019/10/21
  * @description: 手写数字识别
  **/
object HandWritingRecognize {
  def main(args: Array[String]): Unit = {

    // 1、建立session连接
    Logger.getLogger("org").setLevel(Level.WARN)
    val spark: SparkSession = SparkUtil.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._
    import spark.sqlContext._
    val testData = "rec_system/outputdata/knn/testknn"
    val traiData = "rec_system/outputdata/knn/traiknn"

    val testdata: DataFrame = loadTestData(spark, testData).toDF("filename", "label", "features")
    testdata.createTempView("test")
    //    testdata.show(100, false)

    testdata.printSchema()
    val traidata = loadTraiData(spark, traiData).toDF("label", "features")
    //    traidata.show(100, false)
    traidata.createTempView("trai")
    /** *
      * 1、将两张表进行笛卡尔积连接，求两个向量的欧氏距离，表t1
      * 2、对两个向量的欧氏距离打标记，且按照升序排序（距离相似则排在最上面） 表t2
      * 3、对表t2进行过滤求出top5，按照文件名称,a的标签,b的标签进行分组，做累加 表t3
      * 4、对表t3的累加值做排序打标签降序排序，表t4
      * 5、获取表t4中标签为1的值，得出最接近的标签值
      */

    val sqd = (v1: linalg.Vector, v2: linalg.Vector) => {
      Vectors.sqdist(v1, v2)
    }

    spark.udf.register("sqds", sqd)

    val result: DataFrame = spark.sql(
      """
        |
        |select
        |   filename,
        |   alabel,
        |   blabel
        |from (
        |
        |select
        |   filename,
        |   alabel,
        |   blabel,
        |   row_number() over(partition by filename order by cn desc ) n2
        |from
        |(
        |		select
        |		filename,
        |		alabel,
        |		blabel,
        |		count(1) as cn
        |		from
        |		(
        |			select
        |			filename,
        |			alabel,
        |			blabel,
        |			row_number() over(partition by filename order by dist) n1
        |			from (
        |					select
        |					a.filename,
        |					a.label as alabel,
        |					b.label as blabel,
        |					sqds(a.features,b.features) as dist
        |					from test a cross join trai b
        |			) t1
        |		) t2
        |		where n1<5
        |		group by filename,alabel,blabel
        |) t3
        |
        |
        |) t4
        |where n2 = 1
        |
        |""".stripMargin).toDF("filename","alabel","blabel")

    result.show(10,false)

    val unit: RDD[(Double, Double)] = result.rdd.map({
      case Row(filename: String, alabel: String, blabel: String) => (alabel.toDouble, blabel.toDouble)
    })

    println(new MulticlassMetrics(unit).accuracy)

  }

  def loadTestData(spark: SparkSession, path: String): RDD[(String, String, linalg.Vector)] = {
    // 9_9.txt90.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
    val file: Dataset[String] = spark.read.textFile(path)
    file.rdd.map(row => {
      val splits = row.split("\001")
      val fileName = splits(0)
      val lable = splits(1).split("\002")(0)
      val arrStr = splits(1).split("\002")(1)
      val arrDouble: Array[Double] = arrStr.split(",").map(row => row.toDouble)
      val vec = Vectors.dense(arrDouble)
      (fileName, lable, vec)
    })
  }

  def loadTraiData(spark: SparkSession, path: String): RDD[(String, linalg.Vector)] = {
    val file: Dataset[String] = spark.read.textFile(path)
    file.rdd.map(row => {
      val splits = row.split("\001")
      val lable = splits(0)
      val arrStr = splits(1)
      val arrDouble: Array[Double] = arrStr.split(",").map(row => row.toDouble)
      val vec = Vectors.dense(arrDouble)
      (lable, vec)
    })
  }

}
