package com.example
import org.apache.spark.{HashPartitioner, Partitioner}
import org.apache.spark.sql.SparkSession
import io.circe.syntax._
import java.io._
import scala.util.Using

object SparkAStar {
  private object JsonWriterCirce {
    def saveToJson(data: Array[(Int, Option[List[(Int, Int)]])], filePath: String): Unit = {
      // 转换为 JSON
      val json = data.asJson

      // 写入文件
      Using(new PrintWriter(new File(filePath))) { writer =>
        writer.write(json.spaces2) // spaces2 表示2个空格缩进的漂亮打印
      }
    }
  }

  def main(args: Array[String]): Unit = {
    println("hello!")
    val session = SparkSession.builder()
      .appName("ManualGridSize")
      .master("local[*]")
      .config("spark.driver.memory", "2g")  // try to fix java.lang.OutOfMemoryError: GC overhead limit exceeded
      .config("spark.testing.memory", "471859200")  // 在IDEA中运行时：Exception in thread "main" java.lang.IllegalArgumentException: System memory 259522560 must be at least 471859200. Please increase heap size using the --driver-memory option or spark.driver.memory in Spark configuration.
      //.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      //.config("spark.kryo.registrator", "org.apache.sedona.core.serde.SedonaKryoRegistrator")
      //.config("spark.scheduler.listenerbus.eventqueue.threads", "4")
      .getOrCreate()
    session.sparkContext.setLogLevel("ERROR")

    try {
      val l = List(
        (0, "D:\\temp\\jl\\serializeBinary0.data"),
        (1, "D:\\temp\\jl\\serializeBinary1.data"),
        (2, "D:\\temp\\jl\\serializeBinary2.data"),
        (3, "D:\\temp\\jl\\serializeBinary3.data"),
        (4, "D:\\temp\\jl\\serializeBinary4.data"),
        (5, "D:\\temp\\jl\\serializeBinary5.data"),
        (6, "D:\\temp\\jl\\serializeBinary6.data"),
        (7, "D:\\temp\\jl\\serializeBinary7.data"),
        (8, "D:\\temp\\jl\\serializeBinary8.data"),
        (9, "D:\\temp\\jl\\serializeBinary9.data"))
      val rdd = session.sparkContext.parallelize(l).partitionBy(new HashPartitioner(l.length))

//      println("rdd partitions count:" + rdd.getNumPartitions)
//      rdd.mapPartitionsWithIndex((idx, iter) => Iterator((idx, iter.size))).collect().foreach(r=>println("my_partition " + r))
//      println("for each partition: ")
//      rdd.foreachPartition( it => {
//        var lx = List[(Int, String)]()
//        it.foreach( i => lx = lx :+ i)
//        println(" --- end of a partition --- " + lx)
//      })

      // RDD[Int, Option[List[(Int x, Int y)] points]] routes.
      val routes = rdd
        .map(a => { (a._1, QTreeSerializer.deserializeCustom(a._2)) })
        .map( a => {
          QTreeFromGrid.computeAllNeighbors(a._2)
          (a._1, AStar.findPath(a._2, 110, 338, 484, 372)) })

      // 测试，打印结果RDD有多少个Partition，以及每个partition各自有多少数据。
//      println("routes partitions count:" + routes.getNumPartitions)
//      println("routes part id & cc:")
//      routes.mapPartitionsWithIndex((idx, iter) => Iterator((idx, iter.size))).collect().foreach(r=>println(r))
//      println("for each partition: ")
      // 测试，打印每个partitions中的详细数据
//      routes.foreachPartition( it => {
//        var lx = List[(Int, Option[List[(Int, Int)]])]()
//        it.foreach( i => lx = lx :+ i)
//        println(" --- end of a partition --- " + lx)
//      })

      val sample = routes.filter { case (_, value) => value.isEmpty }.take(1)
      if (sample.nonEmpty) {
        println("存在寻找路径失败的子地图，结束工作！")
        return
      }

//      // 方法1 转换为DataFrame后合并分区 [结果是一个目录，不便将结果返回给客户端的后续处理]
//      val df = session.createDataFrame(routes)
//      df.printSchema()
//      // 使用coalesce(1) - 避免shuffle
//      df.coalesce(1).write.json("D:\\temp\\jl\\spark_routes.json")

      // 方法2-1：自己收集结果、自己调用json库。
      val lines = routes.collect().sortBy(t => t._1)
      JsonWriterCirce.saveToJson(lines, "D:\\temp\\jl\\spark_routes2.json")

      // 方法2-2：不用Json格式，直接使用scala原生方式写入文件。[不好解析]
//      import java.io.PrintWriter
//      new PrintWriter("D:\\temp\\jl\\scala_ori_routes.txt") {
//        write(lines.mkString("[", ",\n", "]"))
//        close()
//      }
    }
    finally {
      session.stop()
    }
  }

}

