package cn.doitedu.ml.examples

import cn.doitedu.commons.utils.SparkUtil
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.types.{DataTypes, StructType}

import scala.collection.mutable

/**
 * @Title: ${file_name}
 * @Package ${package_name}
 * @Description: ${todo}
 * @Author hunter@doitedu.cn
 * @date 2020/8/1611:06     
 */
object KnnDemo {

  def main(args: Array[String]): Unit = {

    val spark = SparkUtil.getSparkSession("knn算法示例")

    val schema1 = new StructType()
      .add("label",DataTypes.DoubleType)
      .add("f1",DataTypes.DoubleType)
      .add("f2",DataTypes.DoubleType)
      .add("f3",DataTypes.DoubleType)
      .add("f4",DataTypes.DoubleType)
      .add("f5",DataTypes.DoubleType)


    val schema2 = new StructType()
      .add("id",DataTypes.DoubleType)
      .add("t1",DataTypes.DoubleType)
      .add("t2",DataTypes.DoubleType)
      .add("t3",DataTypes.DoubleType)
      .add("t4",DataTypes.DoubleType)
      .add("t5",DataTypes.DoubleType)

    // 加载样本数据集
    val sample = spark.read.option("header", "true").schema(schema1).csv("portrait\\testdata\\knn\\sample\\sample.txt")

    // 加载测试数据集
    val test = spark.read.option("header", "true").schema(schema2).csv("portrait\\testdata\\knn\\test\\test.txt")


    // 将每一个测试向量  和所有样本向量都关联一遍
    val joined = sample.crossJoin(test)
    //joined.show(100, false)

    /**
     * +-----+---+---+---+---+---+---+---+---+---+---+---+
     * |label|f1 |f2 |f3 |f4 |f5 |id |t1 |t2 |t3 |t4 |t5 |
     * +-----+---+---+---+---+---+---+---+---+---+---+---+
     * |0    |10 |20 |30 |40 |30 |1  |11 |21 |31 |44 |32 |
     * |0    |10 |20 |30 |40 |30 |2  |14 |26 |32 |39 |30 |
     * |0    |10 |20 |30 |40 |30 |3  |32 |14 |21 |42 |32 |
     * |0    |10 |20 |30 |40 |30 |4  |34 |12 |22 |42 |34 |
     *
     */

    import spark.implicits._
    import org.apache.spark.sql.functions._
    // 欧几里得距离计算函数
    val eudi = udf((vec1: mutable.WrappedArray[Double], vec2: mutable.WrappedArray[Double]) => {
      vec1.zip(vec2).map(tp => Math.pow((tp._1 - tp._2), 2)).sum
    })


    val eudi_df = joined.select('label,'id,eudi(array('f1,'f2,'f3,'f4,'f5),array('t1,'t2,'t3,'t4,'t5)) as "eudi")
    eudi_df.show(100,false)
    val wd = Window.partitionBy('id).orderBy('eudi)
    val nearest = eudi_df.select('id,'label,row_number() over(wd) as "rn").where("rn<=5")

    nearest.show(100,false)

    /**
     * +---+-----+---+
     * |id |label|rn |
     * +---+-----+---+
     * |4.0|1.0  |1  |
     * |4.0|1.0  |2  |
     * |4.0|2.0  |3  |        ==>   4.0   1.0  2  ==>  4.0  1.0
     * |4.0|0.0  |4  |        ==>   4.0   0.0  2
     * |4.0|0.0  |5  |         ==>  4.0   2.0  1
     */

    nearest.createTempView("nn")
    val res = spark.sql(
      """
        |
        |select
        |id,label
        |from
        |(
        |select
        |id,label,row_number() over(partition by id order by cnt desc) as rn
        |from
        |(
        |select
        |id,label,count(1) as cnt
        |from nn
        |group by id,label
        |) o1
        |) o2
        |where rn=1
        |
        |
        |""".stripMargin)


    res.show(100,false)

    spark.close()


  }

}
