package cn.doitedu.ml.demo

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.{DataTypes, StructType}

import scala.collection.mutable

/**
 * KNN算法手撕
 *
 *
 */
object KnnDemo {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder().appName("KNN算法手撕").master("local").getOrCreate()

    // 加载样本数据集
    val schema = new StructType()
      .add("height",DataTypes.DoubleType)
      .add("weight",DataTypes.DoubleType)
      .add("xueya",DataTypes.DoubleType)
      .add("xuezhi",DataTypes.DoubleType)
      .add("label",DataTypes.DoubleType)
    val sample = spark.read.schema(schema).csv("user_portrait/data/knn/input/sample.csv")

    // 加载待预测数据集
    val schema2 = new StructType()
      .add("id",DataTypes.DoubleType)
      .add("t_height",DataTypes.DoubleType)
      .add("t_weight",DataTypes.DoubleType)
      .add("t_xueya",DataTypes.DoubleType)
      .add("t_xuezhi",DataTypes.DoubleType)
    val test = spark.read.schema(schema2).csv("user_portrait/data/knn/input/test.csv")

    // 将test数据集和sample数据集做笛卡尔积
    val joined = test.crossJoin(sample)

    import spark.implicits._
    import org.apache.spark.sql.functions._
    val vecDF = joined.select('id,array('t_height,'t_weight,'t_xueya,'t_xuezhi) as "vec1",array('height,'weight,'xueya,'xuezhi) as "vec2",'label)


    // 欧几里得距离计算函数
    val eu_dist = udf((vec1:mutable.WrappedArray[Double], vec2:mutable.WrappedArray[Double])=>{
      val arr1 = vec1.toArray
      val arr2 = vec2.toArray

      val zipped: Array[(Double, Double)] = arr1.zip(arr2)
      zipped.map(tp=>Math.pow(tp._1-tp._2,2)).sum
    })


    val distDF = vecDF.select('id,eu_dist('vec1,'vec2) as "eudist",'label)

    distDF.show(100,false)

    // 计算每一个测试样本距离最近的3个经验样本
    distDF.createTempView("dist")
    val top3 = spark.sql(
      """
        |
        |
        |select
        |id,label
        |from (
        |select
        |id,label,row_number() over(partition by id order by eudist) as rn
        |from dist ) o
        |where rn<4
        |
        |
        |""".stripMargin)
    top3.show(100,false)

    /**
     * +---+-----+
     * |id |label|
     * +---+-----+
     * |1.0|0.0  |
     * |1.0|0.0  |
     * |1.0|0.0  |
     */

    // 输出最终预测结果
    top3.createTempView("top3")
    val res = spark.sql(
      """
        |select
        |id,if(sum(label)>=2,1.0,0.0) as predict
        |
        |from top3
        |group by id
        |""".stripMargin)
    res.show(100,false)

    spark.close()
  }
}
