import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SaveMode, SparkSession}
import org.apache.spark.sql.types.{ByteType, DoubleType, IntegerType, LongType, StringType, StructField, StructType}
import org.apache.spark.graphx.{Edge, Graph, VertexId}

// 根据"hdfs://localhost:9000/tmp/jsongrid/vertices"中的顶点数据(id, 是否可行)生成边。
// 结果写入到"hdfs://localhost:9000/tmp/jsongrid/edges"
object EdgesFromVertex {
  def main(args: Array[String]): Unit = {
    println("Hello world!")

    val session = SparkSession.builder()
      .appName("ManualGridSize")
      .master("local[*]")
      .config("spark.driver.memory", "2g")  // try to fix java.lang.OutOfMemoryError: GC overhead limit exceeded
      .config("spark.testing.memory", "471859200")  // 在IDEA中运行时：Exception in thread "main" java.lang.IllegalArgumentException: System memory 259522560 must be at least 471859200. Please increase heap size using the --driver-memory option or spark.driver.memory in Spark configuration.
      .getOrCreate()
    session.sparkContext.setLogLevel("ERROR")

    try {
      val gridDF = session.read.json("hdfs://localhost:9000/tmp/jsongrid/grids")
      gridDF.show(1)
      //println(gridDF.count())
      // =1表示可通行，在地图上使用白色方块表示。
      // 测试数据是固定的512*512大小，且id = x * 512 + y。这里根据id计算出来x,y
      val rddWhite = gridDF.filter("a = 1").rdd.map(row => {
        val grid_id = row.getAs[Long]("grid_id")
        val x = grid_id / 512
        val y = grid_id % 512
        (x, y)
      })
      val arrayWhite = rddWhite.collect()
      val whiteXYSet = arrayWhite.map(e=>(e._1, e._2)).toSet

      def gen8Neighbors(x: Long, y: Long): Seq[(Long, Long)] = {
        for {
          dx <- -1 to 1
          dy <- -1 to 1
          if dx != 0 || dy != 0
          nx = x + dx
          ny = y + dy
          if whiteXYSet.contains((nx, ny))
        } yield (nx, ny)
      }

      val edges: RDD[Edge[Double]] = session.sparkContext.parallelize(
        arrayWhite.flatMap { case (x, y) =>
          gen8Neighbors(x, y).map { case (nx, ny) =>
            val srcId = x * 512 + y
            val dstId = nx * 512 + ny
            Edge(srcId, dstId, 1.0) // 边属性: 通行代价
          }
        }
      )
      //println("count of edge: " + edges.count())
      //保存edges:
      // StructType是Spark SQL中定义结构化数据模式的核心类，用于描述 DataFrame/Dataset 的列结构和数据类型。它是 Spark 类型系统的基石之一。
      val edgesSchema = StructType(
        Array(
          StructField("srcId", LongType, nullable = true),
          StructField("dstId", LongType, nullable = true),
          StructField("attr", DoubleType, nullable = true),
        )
      )
      val edgesRows = edges.map{ e => Row(e.srcId, e.dstId, e.attr) }
      val df = session.createDataFrame(edgesRows, edgesSchema)
      //df.show(10)
      df.write.format("json").mode(SaveMode.Overwrite).save("hdfs://localhost:9000/tmp/jsongrid/edges")
      println("work finished ..... ")
//      val vertices : RDD[(VertexId, (Long, Long))] = gridDF.filter("a = 1").rdd
//        .map { row =>
//          val gridId = row.getAs[Long]("grid_id")
//          val x = gridId / 512
//          val y = gridId % 512
//          val vertexId = gridId
//          (vertexId, (x, y)) // 顶点属性保存原始坐标
//        }
//      println("count of vertex: " + vertices.count())
//            val graph = Graph(vertices, edges)
//            println("[***]graph vertices: " + graph.vertices.count() + ", edges:" + graph.edges.count()  + "[****]")
    }
    finally {
      println("finally ..... ")
      session.stop()
    }

  }
}
