package cn.wzk.doitedu.datayi.profile

import org.apache.spark.sql.{Dataset, SparkSession}

import scala.collection.mutable

object KnnDemo {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName(this.getClass.getName)
      .master("local[*]")
      .getOrCreate()
    import spark.implicits._

    //加载样本数据集
    val samp: Dataset[String] = spark.read.textFile("user_profile/data/knn/samples_vec")

    //向量化
    val samp_vec = samp.map(e => {
      val arr = e.split(",")
      (arr(arr.length - 1), arr.slice(0, arr.length - 1).map(_.toDouble))
    }).toDF("label", "vec")

    //加载测试数据库

    val test = spark.read.textFile("user_profile/data/knn/test_vec/test.csv")

    // 对每个测试数据，求与每个样本向量之间的距离
    val test_vec = test.map(e => {
      val arr = e.split(",")
      (arr(arr.length - 1), arr.slice(0, arr.length - 1).map(_.toDouble))
    }).toDF("image_name","vec")

    samp_vec.createTempView("samp")
    test_vec.createTempView("test")

    //自定义函数
    val dist = (arr1:mutable.WrappedArray[Double], arr2:mutable.WrappedArray[Double]) => {
      // 1,1,0,1,0,...
      // 0,1,1,0,1,...
      // =zip=> [(1,0),(1,1),(0,1),.....] =>map=>  [1,0,1,...]
      arr1.zip(arr2).map(tp => Math.pow(tp._1 - tp._2,2)).sum
    }

    spark.udf.register("eudist",dist)

    // 对每个测试数据，求与每个样本向量之间的距离
    spark.sql(
      """
        |
        |select
        |  test.image_name as image_name,
        |  eudist(samp.vec,test.vec) as dist,
        |  samp.label
        |from
        |  samp cross join test
        |""".stripMargin).createTempView("tmp")




    // 找每个测试数据最近的3个样本
    spark.sql(
      """
        |select
        | image_name,
        | label
        |from(
          |select
          |  image_name,
          |  label,
          |  row_number() over(partition by label order by dist) as rn
          |from tmp
        |) o
        |where rn <= 3
        |""".stripMargin).createTempView("temp2")





    // 计算3个最近样本中，哪个类别占比更大
    spark.sql(
      """
        |select
        |  image_name,
        |  label
        |from temp2
        |group by image_name,label having count(1) >= 2
        |""".stripMargin).show()


    spark.close()

  }
}
