package com.example
import org.apache.sedona.core.enums.{GridType, IndexType}
import org.apache.sedona.core.spatialOperator.{JoinQuery, SpatialPredicate}
import org.apache.sedona.spark.SedonaContext
import org.apache.sedona.sql.utils.Adapter
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.graphx.Edge
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.{expr, when}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}
import org.locationtech.jts.geom.{Coordinate, Geometry, GeometryFactory}

import java.io.FileOutputStream

object MakeGrid {
  private val dx = 6000//math.round((140.0-100.0)/0.01).toInt // 4000
  private val dy = 3000//math.round((23.634-(-6.366))/0.01).toInt // 3000
  def main(args: Array[String]): Unit = {
    println("Hello world!")

    val session = SparkSession.builder()
      .appName("ManualGridSize")
//      .master("local[*]")
//      .config("spark.driver.maxResultSize", "2g") // org.apache.spark.SparkException: Job aborted due to stage failure: Total size of serialized results of 1 tasks (1917.5 MiB) is bigger than spark.driver.maxResultSize (1024.0 MiB)
//      .config("spark.driver.memory", "2g")  // try to fix java.lang.OutOfMemoryError: GC overhead limit exceeded
//      .config("spark.testing.memory", "471859200")  // 在IDEA中运行时：Exception in thread "main" java.lang.IllegalArgumentException: System memory 259522560 must be at least 471859200. Please increase heap size using the --driver-memory option or spark.driver.memory in Spark configuration.
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.kryo.registrator", "org.apache.sedona.core.serde.SedonaKryoRegistrator")
      .config("spark.scheduler.listenerbus.eventqueue.threads", "4")
      .getOrCreate()
    session.sparkContext.setLogLevel("ERROR")

    try {
      val spark = SedonaContext.create(session)
      import spark.implicits._
      println("[********]Sedona initialized successfully![********]")
      // [1] 需要切割的一个区域保存在postgres数据库中，先读取出来。
      //     有两块区域，一块是区域的边界polygon, 另一块是陆地边界polygon
      val jdbcUrl = "jdbc:postgresql://192.168.20.53:5432/seamap"
      val connectionProperties = new java.util.Properties()
      connectionProperties.put("user", "gis_read")
      connectionProperties.put("password", "isd5Wp3h2sqQ")
      connectionProperties.put("driver", "org.postgresql.Driver")

      val minX = 100.0
      val minY = -6.366
      val maxX = 160.0
      val maxY = 23.634
      val coordinates = Array(
        new Coordinate(minX, minY),
        new Coordinate(maxX, minY),
        new Coordinate(maxX, maxY),
        new Coordinate(minX, maxY),
        new Coordinate(minX, minY) // 闭合
      )

      val geometryFactory = new GeometryFactory()
      // 使用.where语句，只提取必要的区域。
      val wkt = geometryFactory.createPolygon(coordinates).toString
      val landDF = spark.read
        .jdbc(jdbcUrl, "datagis.t_lndare_r", connectionProperties)
        .selectExpr("ST_GeomFromWKB(geom) AS geom")
        .where("ST_Intersects(geom, ST_GeomFromText('"+wkt+"', 4326))")
      //landDF.cache()
//      println("[****] t_lndare_t table [****]")
//      landDF.show(1)

//      val landDF2 = spark.read
//        .jdbc(jdbcUrl, "datagis.t_resare_r", connectionProperties)
//        .selectExpr("ST_GeomFromWKB(geom) AS geom")
//        .where("ST_Intersects(geom, ST_GeomFromText('"+wkt+"', 4326))")
      //landDF2.cache()
//      println("[****] t_resare_r table [****]")
//      landDF2.show(1)

      val landDF3 = spark.read
        .jdbc(jdbcUrl, "datagis.t_wrecks_p", connectionProperties)
        .selectExpr("ST_GeomFromWKB(geom) AS geom")
        .where("ST_Intersects(geom, ST_GeomFromText('"+wkt+"', 4326))")

      //landDF3.cache()

      val landDF4 = spark.read
        .jdbc(jdbcUrl, "datagis.t_obstrn_p", connectionProperties)
        .selectExpr("ST_GeomFromWKB(geom) AS geom")
        .where("ST_Intersects(geom, ST_GeomFromText('"+wkt+"', 4326))")
      //landDF4.cache()

      val landDF5 = spark.read
        .jdbc(jdbcUrl, "datagis.t_lndare_p", connectionProperties)
        .selectExpr("ST_GeomFromWKB(geom) AS geom")
        .where("ST_Intersects(geom, ST_GeomFromText('"+wkt+"', 4326))")
      //landDF5.cache()

      val gridDF = genBaseGrid(spark, 100.000, -6.366, dx, dy)
//      println("[****] basic grid table [****]")
//      gridDF.show(1)
      gridDF.createOrReplaceTempView("t_basic_grids")
      println("*************************************************************************************")
      println(s"[****]count of basic grids is ${gridDF.count()}[****]")
      //println(s"[****]with partitions: ${gridDF.rdd.getNumPartitions}[****]")
      println("*************************************************************************************")

//      landDF.createOrReplaceTempView("t_lndare")
//      landDF2.createOrReplaceTempView("t_resare")
//      landDF3.createOrReplaceTempView("t_wrecks_p")
//      landDF4.createOrReplaceTempView("t_obstrn_p")
//      landDF5.createOrReplaceTempView("t_lndare_p")

//      val dfObstacles = landDF.union(landDF2).union(landDF3).union(landDF4).union(landDF5).distinct()
      val dfObstacles = landDF.union(landDF3).union(landDF4).union(landDF5).distinct()
//      dfObstacles.show(1)
//      dfObstacles.createOrReplaceTempView("obstacles")
//      println("[****]count of obstacles rdd is " + dfObstacles.count() + "[****]")

      // e1
      val gdfGrids = Adapter.toSpatialRdd(gridDF, "geom")
      val gdfObstacles = Adapter.toSpatialRdd(dfObstacles, "geom")

      // 2. 分析数据边界（必须步骤）
      gdfGrids.analyze()
      gdfObstacles.analyze()

      // 关键步骤：进行空间分区
      gdfGrids.spatialPartitioning(GridType.KDBTREE)  // 选择分区策略
      gdfObstacles.spatialPartitioning(gdfGrids.getPartitioner)  // 使用相同的分区器

      // 建立空间索引（可选，大数据集时建议使用）
      gdfGrids.buildIndex(IndexType.QUADTREE, false)
      gdfObstacles.buildIndex(IndexType.QUADTREE, false)

      // 执行空间连接
      //(boolean useIndex, SpatialPredicate spatialPredicate, IndexType polygonIndexType, JoinBuildSide joinBuildSide)
      val joinParams = new JoinQuery.JoinParams(true, SpatialPredicate.INTERSECTS)
      val resultPairRDD = JoinQuery.SpatialJoinQueryFlat(gdfObstacles, gdfGrids, joinParams)
      //println(resultPairRDD.take(10))
      println("count of SpatialJoinQueryFlat rows = " + resultPairRDD.count())

//      val dTest = Adapter.toDf(gdfGrids, spark)
//      dTest.printSchema()
//      dTest.show(2)

      // 将结果转换回 DataFrame
      val rowRDD: JavaRDD[Row] = resultPairRDD.map {
        case (geom: Geometry, _) =>
          // 获取原始行数据
          //println(geom.getUserData.asInstanceOf[String])
          val data = geom.getUserData.asInstanceOf[String].split("\t")
//        StructField("x", LongType, nullable = true),
//        StructField("y", LongType, nullable = true),
//        StructField("grid_id", LongType, nullable = true),
//        StructField("geom", StringType, nullable = true),
//        StructField("a", ByteType, nullable = true)  // =1表示可通行。
          Row(data(0).toLong, data(1).toLong, data(2).toLong, geom, 0.toByte) // (id, name, geometry)
      }
      val resultDF = spark.createDataFrame(rowRDD, gridDF.schema)
//      resultDF.show(10)
//      println("count of resultDF = " + resultDF.count())
      val distinctByIdDF = resultDF.dropDuplicates("grid_id")
//      println("count of distinctByIdDF = " + distinctByIdDF.count())

      val fullGrid = gridDF.alias("a")
        .join(distinctByIdDF.alias("b"), $"a.grid_id" === $"b.grid_id", "left")
        .withColumn("d", when($"b.grid_id".isNotNull, 0).otherwise(1))
        .select($"a.grid_id", $"d")

      // 显示结果
      fullGrid.show(5)
      println("count of resultDF = " + fullGrid.count())

      // 各种方式的耗时对比
      // (一)转换为byte数组 (每个值占1字节) ： Total time: 240 s (0:04:00.0)
      // (二)保存为Json
      // 有union：Total time: 136 s (0:02:16.0)
      // 不union的情况：只保存blackGrids Total time: 79 s (0:01:19.0)
      // 不union,不算blackGrids的情况：只保存whiteGrids Total time: 81 s (0:01:21.0)
      // 不union,不算blackGrids&whiteGrids的情况： 只保存gridsOnLand Total time: 77 s (0:01:17.0)
      // (三)不保存文件，只打印gridsOnLand的数量，而且不union,不算blackGrids&whiteGrids的情况 Total time: 80 s (0:01:20.0)
      saveByteMatrix(fullGrid);
      //saveGraph(spark, C)

      println("[***]task finished.[****]")
    }
    finally {
      session.stop()
    }
  }

  private def saveByteMatrix(gridDF: DataFrame): Unit = {
    // 保存到本地
    // (0)[存为二进制数组一个位置一个字节]，原始DataFrame包含(grid_id, d)列
    // 转换为byte数组 (每个值占1字节)
    println("collect grid-access and save as byte matrix.")
    val gridArray = gridDF
      .orderBy("grid_id")
      .collect()
      .map(rows => rows.getInt(1)) // 提取d值
    val byteArray = gridArray.map(_.toByte)
    // 将网格信息保存为二进制文件outfile.bin
    val path = "/tmp/grids.data" // "D:\\temp\\jl\\grids.data"
    val fos = new FileOutputStream(path)
    fos.write(byteArray)
    fos.close()
  }

  private def genBaseGrid(spark: SparkSession, lonMin: Double, latMin: Double, numCols: Int, numRows: Int): DataFrame = {
    val geometryFactory = new GeometryFactory()
    val gridPolygons = for {
      x <- 0 until numCols
      y <- 0 until numRows
    } yield {
      val minX = lonMin + x * 0.01
      val minY = latMin + y * 0.005
      val maxX = minX + 0.01
      val maxY = minY + 0.005
      val coordinates = Array(
        new Coordinate(minX, minY),
        new Coordinate(maxX, minY),
        new Coordinate(maxX, maxY),
        new Coordinate(minX, maxY),
        new Coordinate(minX, minY) // 闭合
      )
      // 为什么用1.toByte而不是直接用1: java.lang.RuntimeException: Error while encoding: java.lang.RuntimeException:
      // java.lang.Integer is not a valid external type for schema of tinyint
      (x.toLong, y.toLong, (x + y*numCols).toLong, geometryFactory.createPolygon(coordinates).toString, 1.toByte)
    }
    // 从列表创建RDD，再从RDD创建DataFrame。格式和下面的StructType对应
    val gridPolygonsRDD = spark.sparkContext.parallelize(gridPolygons).map(row => Row(row._1, row._2, row._3, row._4, row._5))
    // StructType是Spark SQL中定义结构化数据模式的核心类，用于描述 DataFrame/Dataset 的列结构和数据类型。它是 Spark 类型系统的基石之一。
    val gridsSchema = StructType(
      Array(
        StructField("x", LongType, nullable = true),
        StructField("y", LongType, nullable = true),
        StructField("grid_id", LongType, nullable = true),
        StructField("geom", StringType, nullable = true),
        StructField("a", ByteType, nullable = true)  // =1表示可通行。
      )
    )
//    val windowSpec = Window.partitionBy("i").orderBy("i", "j")
    val df = spark.createDataFrame(gridPolygonsRDD, gridsSchema)
//      .withColumn("grid_id", row_number().over(windowSpec).cast("Long"))
//      .withColumn("grid_id", expr("uuid()"))
      .withColumn("geom", expr("ST_GeomFromText(geom)"))
    df
  }
}

