package com.lagoue.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @author: yehw
 * @date: 2020/10/24 00:09
 * @description: 使⽤鸢尾花数据集实现KNN算法
 */
//定义类
case class Lris(f1: Float, f2: Float, f3: Float, f4: Float)

object homework4 {
  def main(args: Array[String]): Unit = {
    val k = 9 //KNN中的k
    val d = 4 //4个变量计算
    //创建sparkContext
    val spark = SparkSession.builder().appName("homework4").master("local[*]").getOrCreate()
    val sc = spark.sparkContext
    sc.setLogLevel("WARN")
    import spark.implicits._
    //广播共享对象
    val broadcastK = sc.broadcast(k)
    val broadcastD = sc.broadcast(d)
    //读取文件
    val S: RDD[Array[String]] = sc.textFile("data/Iris.csv").map(line => {
      val arr = line.split(",")
      Array(arr(0), arr(1) + ";" + arr(2) + ";" + arr(3) + ";" + arr(4), arr(5))
    })
    val R: RDD[Array[String]] = sc.textFile("data/Iris2.csv").map(line => {
      val arr = line.split(",")
      Array(arr(0), arr(1) + ";" + arr(2) + ";" + arr(3) + ";" + arr(4))
    })

    //R对S做笛卡尔积
    //val cart = R cartesian S
    //cart.foreach(println)
    def calculateDistance(rAsString: String, sAsString: String, d: Int): Double = {
      val r = rAsString.split(";").map(_.toDouble)
      val s = sAsString.split(";").map(_.toDouble)
      if (r.length != d || s.length != d) Double.NaN else {
        math.sqrt((r, s).zipped.take(d).map {
          case (ri, si) => math.pow((ri - si), 2)
        }.reduce(_ + _))
      }
    }

    //R对S做笛卡尔积
    val cart = S cartesian R
    //找到R中的r与S中的s之间的距离
    val knnMapped = cart.map(cartRecord => {
      val rRecord = cartRecord._1
      val sRecord = cartRecord._2
      val rRecordID = rRecord(0)
      val rKind = rRecord(2)
      val s = sRecord(1)
      val sClassificationID = sRecord(0)
      val r = rRecord(1)
      val distance = calculateDistance(r, s, broadcastD.value)
      (sClassificationID, (distance, (rRecordID, rKind)))
    })
    //按照R中的r对距离进行分组
    val knnGrouped = knnMapped.groupByKey()
    knnGrouped.foreach(println)
    //找到k个邻近并对r分类
    val knnOutput = knnGrouped.mapValues(itr => {
      val nearestK = itr.toList.sortBy(_._1).take(broadcastK.value)
      val majority = nearestK.map(f => (f._2._2,1)).groupBy(_._1).mapValues(list => {
        val (stringList, intlist) = list.unzip
        intlist.sum
      })
      majority.maxBy(_._2)._1
    })
    println("结果：")
    knnOutput.foreach(println)
    sc.stop()
  }
}
