import java.util.Date

import scala.math._
import BalanceQuery.queryFromAlluxio
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.mutable.ArrayBuffer

object distanceCompute {
  def main(args: Array[String]) {

//    ./spark-submit --master spark://yun0:7077  --class distanceCompute /data/rucer/testfile/testsscala.jar hdfs://yun0:9000/hw/data_test.txt 3 hdfs://yun0:9000/hw/result_test
    //accept the input params
    println("Arguments = " + args.mkString(" "))
    val infile = args(0) // path of input file
    val distance = args(1).toDouble // the value of distance
    val outfile = args(2) // file path  of output dir

    filterPoints(infile,distance,outfile)

  }

  /**
    * 过滤两点距离满足< distance的点
    *
    */
  def filterPoints(infilePath: String, distance : Double, outfilePath:String): Unit = {
    val conf = new SparkConf().setAppName("distanceCompute")
        val sc = new SparkContext(conf)
        val sqlContext = SparkSession.builder().appName("distanceCompute").getOrCreate()

    //// 在Scala中使用反射方式，进行RDD到DataFrame的转换，需要手动导入一个隐式转换
    import sqlContext.implicits._
    var rData = sc.textFile(infilePath)
    //不改变原rdd
    var rdata_split = rData.map(line=>line.split(" "))

//    line =>
//    {
//      //data sample（1 45.3332 -74.987)
//      val arr = line.split(" ")
//      (arr(1).toDouble, arr(2).toDouble) // TODO: Correct the index - DONE
//    }

    val total = rdata_split.collect().length
    val splitdata = rdata_split.collect()

    var final_data = rdata_split.map(line=>{


      val bufferArray = ArrayBuffer[Int]() // 或者new ArrayBuffer [int]，一个空的数组缓冲，准备存放整数

      for(i <- (line(0).toInt-1) until total) {
        var isSati : Boolean= sqrt(pow((line(1).toDouble-splitdata(i)(1).toDouble),2) + pow((line(2).toDouble-splitdata(i)(2).toDouble),2)) < distance
        if (isSati){
          bufferArray+=(i.toInt+1)
        }
      }
      (line(0),bufferArray)
    })
//    final_data.saveAsTextFile("hdfs://202.112.113.71:9000/hw/data_result_filter")
    final_data.saveAsTextFile(outfilePath)


//    1 2 3      1-1 0 1-2 1.414 1-3 2.828 1-4 4.12 1-5 5.39
//    2 3 4      2-2 0 2-3 1.414 2-4  3   2-5 4.1
//    3 4 5      3-3 0 3-4 2.2 3-5 3
//    4 3 7      4-4 0 4-5 1.414
//    5 4 8      5-5 0




      // var data_join = rdata_split.cartesian(rdata_split).filter(line=>sqrt( pow((line._1(1).toDouble-line._2(1).toDouble),2) + pow((line._1(2).toDouble-line._2(2).toDouble),2) ) < 0)
//    var nums = data_join.map(line=>(line._1(0),line._2(0), sqrt( pow((line._1(1).toDouble-line._2(1).toDouble),2) + pow((line._1(2).toDouble-line._2(2).toDouble),2) ) ))
//    nums.coalesce(1).saveAsTextFile("hdfs://202.112.113.71:9000/hw/data_result")

//    var filters = nums.filter(line=>line._3 < border)


//  var rdata_split_df2 = data_join_df.withColumn("_4",rdata_split_df("_2")*rdata_split_df("_2")+rdata_split_df("_3")*rdata_split_df("_3"))
    //    rdata_split_df2


//    rddS.saveAsTextFile("hdfs://202.112.113.71:9000/hw/data_result.txt")

    //./spark-submit --master spark://yun0:7077 --num-executors 6 --executor-memory 4G  --executor-cores 3  --driver-memory 1G --conf spark.default.parallelism=10 --class distanceCompute



  }

  }
