package main.scala.KNNalgorithm



import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.mutable


/**
 *
 * @param value data content
 * @param dim   at which dimension we split our data seqs.
 * @param left  left node
 * @param right right node
 */
case class TreeNode(value: Seq[Double],
                    dim: Int,
                    var left: TreeNode,
                    var right: TreeNode) {

  var parent: TreeNode = _
  var brotherNode: TreeNode = _
  /**
   * Below describes a rule:
   *  if one of the node of left and right is null,
   *  its parent will be _, that is to say we will  search the
   *  null node due to no parent node or brother node.
   */
  if (left != null) {
    left.parent = this
    left.brotherNode = right
  }

  if (right != null) {
    right.parent = this
    right.brotherNode = left
  }

}





object KDtree {


  /**
   * At this version, modification is made on data input style,
   * we can read the data from csv file with each line having format like d1,d2,d3,d4,...
   * Note that di is a number, and each line should have equal number of the numbers.
   * DrawBack: it is a single machine running method.
   * @param args
   */


  def main(args: Array[String]): Unit = {
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)

    val spark = SparkSession.builder()
      .appName(this.getClass.getName)
      .master("local[*]")
      .getOrCreate()
//    val value: RDD[Seq[Double]] = spark.sparkContext.textFile("data/your_graph_path_EXA.csv")
    val value: RDD[Seq[Double]] = spark.sparkContext.textFile("src/main/test/Douban-dataset/data/edges2.csv")
    .map(x => x.split(",").toSeq)
    .map(x => (x.map(y => y.toDouble)))
    println("reading over!!!")

//    val nodes: Seq[Seq[Double]] = value.collect()//.map(x => Seq[Seq[Double]](x)).fold(Seq[Seq[Double]]())(_ ++: _)
    val nodes: Array[Seq[Double]] = value.collect()//.map(x => Seq[Seq[Double]](x)).fold(Seq[Seq[Double]]())(_ ++: _)
    val shape = nodes.head.size
//    println("vector dimension is  ",shape)

//    println("============seq=====================")
//    nodes.foreach(println)



//
//    val nodes: Seq[Seq[Double]] = {
//      Seq(Seq(2, 3), Seq(5, 4), Seq(9, 6), Seq(4, 7), Seq(8, 1), Seq(7, 2))
//
//    }
//    nodes.foreach(println)


    val KdT = new KdT()


    val treeNode: TreeNode = KdT.creatKdTree(nodes, 0, shape)

//    println(treeNode)



//    println("=====the distance between the element contained in this tree and point(4,5.6,6,7) =========")
//    nodes.map(x => {
//      val d = KdT.euclidean(x, Seq(4,5.6,6))
//      (d, x)
//    })
//      .sortBy(_._1)
//      .foreach(println)

    println("============k nearest neighbor==============================")

//
//    KdT
//      .knn(treeNode, Seq(4,5.6,6), 3)
//      .map(x => (x._1, x._2.value))
//      .foreach(println)

    KdT
      .knn(treeNode, treeNode.value, 3)
      .map(x => (x._1, x._2.value))
      .foreach(println)
//    val arrayd = nodes.map(x =>
//      KdT
//        .knn(treeNode, x, 3) )
//
//    arrayd.foreach(x=>println(x))



//    println("++++++++++++++Nearest neighbor+++++++++++++++++")
//    println(KdT.nearestSearch(treeNode,Seq(4,5.6,6)).value)

//    println("=============Preorder Traverse===========")
//    KdT.GenPreOrder(treeNode)





  }



}


