package com.fwmagic.spark.ml.knn

import com.fwmagic.spark.util.SparkUtils
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.types.{DataTypes, StructType}
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.mutable

/**
 * KNN算法，分类问题案例
 */
object KnnDemo {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.INFO)

    val spark: SparkSession = SparkUtils.getSparkSession(this.getClass.getSimpleName)

    //加载样本表
    val schema: StructType = new StructType()
      .add("label", DataTypes.DoubleType)
      .add("f1", DataTypes.DoubleType)
      .add("f2", DataTypes.DoubleType)
      .add("f3", DataTypes.DoubleType)
      .add("f4", DataTypes.DoubleType)
      .add("f5", DataTypes.DoubleType)

    //读取样本数据
    val sample: DataFrame = spark.read
      .schema(schema)
      .option("header", true)
      .csv("data/knn/smaple.csv");

    //注册成sample表
    sample.createOrReplaceTempView("sample")


    //加载样本表
    val schema2: StructType = new StructType()
      .add("id", DataTypes.DoubleType)
      .add("f1", DataTypes.DoubleType)
      .add("f2", DataTypes.DoubleType)
      .add("f3", DataTypes.DoubleType)
      .add("f4", DataTypes.DoubleType)
      .add("f5", DataTypes.DoubleType)

    //读取测试数据
    val test: DataFrame = spark.read
      .schema(schema2)
      .option("header", true)
      .csv("data/knn/test.csv");

    //注册成test
    test.createOrReplaceTempView("test")

    //1.拿这个未知点(每一个未知样本)，和所有已知类别的样本点，逐一计算一次距离（相似度）
    spark.udf.register("edui", eudi)

    val edui: DataFrame = spark.sql(
      """
        |select
        |a.id,b.label,
        |edui(array(a.f1,a.f2,a.f3,a.f4,a.f5),array(b.f1,b.f2,b.f3,b.f4,b.f5)) as eudi
        |from test a cross join sample b
        |""".stripMargin)

    edui.createOrReplaceTempView("edui")

    //2.根据距离排序，找到离这个未知点距离最近（相似度最大）的K个样本点
    val tn: DataFrame = spark.sql(
      """
        |select id,label
        |from
        |(select id,label,
        |row_number() over(partition by id order by eudi desc) rank
        |from edui) a where rank<=5
        |""".stripMargin)

    tn.createOrReplaceTempView("tn")

    /**
     * |id |label|eudi               |
     * +---+-----+-------------------+
     * |1.0|0.0  |0.2402530733520421 |
     * |1.0|1.0  |0.2240092377397959 |
     * |1.0|0.0  |0.21712927295533244|
     * |1.0|1.0  |0.18660549686337075|
     * |1.0|0.0  |0.1827439976315568 |
     * |4.0|1.0  |0.21712927295533244|
     * |4.0|0.0  |0.1757340838011157 |
     * |4.0|1.0  |0.1639607805437114 |
     * |4.0|1.0  |0.15438708879488486|
     * |4.0|1.0  |0.14438708879488486|
     */
    //3.从这个距离最近的k个样本点，判断哪一种类别的占比更大（这就是算法的输出结果）
    val res: DataFrame = spark.sql(
      """
        |-- 思想：按人分组，将邻居的label求sum，如果>2，说明邻居中lable为1的至少占3个，那么他就属于类别1
        |select
        |id,if(sum(label)>2,1,0) predict
        |from tn group by id
        |""".stripMargin)

    res.show(100,false)

    spark.close()
  }

  //计算欧式距离相似度的函数
  val eudi = (f1: mutable.WrappedArray[Double], f2: mutable.WrappedArray[Double]) => {
    // [(10,20),(20,30)]
    val d2: Double = f1.zip(f2).map(t => Math.pow(t._1 - t._2, 2)).sum
    //+1是为了防止分母为0，欧式距离的导数即计算欧式距离相似度
    1 / (Math.pow(d2, 0.5) + 1)
  }

}
