package cn.doitedu.ml.knn

import org.apache.spark.sql.SparkSession

import scala.collection.mutable

/**
 * 手写数字识别，knn算法手撕
 *
 * 将待预测图片的特征向量，跟所有样本向量求距离，找到待预测向量的n个最近的样本向量
 * 然后，统计这最近的n个样本向量中，属于哪种类别的多，则认为该待预测向量属于该类别
 *
 *
 */
object DigitsRecognize {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName("手写数字识别")
      .master("local")
      .getOrCreate()
    import spark.implicits._

    // 加载样本数据集
    val sample = spark.read.textFile("userprofile/data/knndemo/sample_vector")
    sample.map(line=>{
      val arr = line.split(",")
      (arr(0),arr.tail)
    }).toDF("label","features").createTempView("sample_vec")

    // 加载待预测数据集（测试）
    val test = spark.read.textFile("userprofile/data/knndemo/test_vector")
    test.map(line=>{
      val arr = line.split(",")
      (arr(0),arr.tail)
    }).toDF("img","features").createTempView("test_vec")

    // 对两个数据集做笛卡尔积
    spark.sql(
      """
        |
        |select
        |s.label,
        |s.features as s_features,
        |t.img,
        |t.features as t_features
        |
        |from sample_vec s cross join test_vec t
        |
        |""".stripMargin).createTempView("joined")


    // 求每个样本和每个待预测图片的  特征向量的距离

    val eudist = (vec1:mutable.WrappedArray[String], vec2:mutable.WrappedArray[String])=>{
      val v1 = vec1.map(s=>s.toInt).toArray
      val v2 = vec2.map(s=>s.toInt).toArray

      val tuples: Array[(Int, Int)] = v1.zip(v2)

      val doubles: Array[Double] = tuples.map(tp => Math.pow(tp._1 - tp._2, 2))

      doubles.sum
    }
    spark.udf.register("eudist",eudist)


    spark.sql(
      """
        |
        |select
        |label,
        |img,
        |eudist(s_features,t_features) as dist
        |from joined
        |
        |""".stripMargin).createTempView("dist_view")

    /**
     * +-----+---+-----+
     * |label|img|dist |
     * +-----+---+-----+
     * |7    |a  |140.0|
     * |7    |b  |291.0|
     * |7    |a  |71.0 |
     * |7    |b  |262.0|
     * |7    |a  |184.0|
     * |7    |b  |315.0|
     * |7    |a  |67.0 |
     * |7    |b  |238.0|
     * |6    |a  |242.0|
     * |6    |b  |241.0|
     * |6    |a  |221.0|
     * |6    |b  |132.0|
     * |6    |a  |279.0|
     * |6    |b  |302.0|
     * |6    |a  |204.0|
     * |6    |b  |179.0|
     * +-----+---+-----+
     */

    spark.sql(
      """
        |
        |select
        |label,
        |img
        |
        |from
        |(
        |select
        |  label,
        |  img,
        |  row_number()  over(partition by img order by dist)  as  rn
        |from dist_view
        |) o
        |where rn<=3
        |
        |""".stripMargin).createTempView("tmp")

    /**
     * +-----+---+
     * |label|img|
     * +-----+---+
     * |6    |b  |
     * |6    |b  |
     * |7    |b  |
     * |7    |a  |
     * |7    |a  |
     * |7    |a  |
     * +-----+---+
     */

    spark.sql(
      """
        |
        |select
        |
        |img,
        |label
        |
        |from tmp
        |group by label,img
        |having count(1)>1
        |
        |
        |
        |""".stripMargin).show(100,false)




    spark.close()


  }
}
