import org.apache.sedona.core.enums.GridType
import org.apache.sedona.core.spatialRDD.SpatialRDD
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}
import org.apache.sedona.spark.SedonaContext
import org.apache.sedona.sql.utils.Adapter
import org.apache.sedona.core.enums.IndexType
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.{expr, row_number}
import org.apache.spark.sql.types.{LongType, StringType, StructField, StructType}
import org.locationtech.jts.geom.{Coordinate, GeometryFactory}

// 在IDEA的命令行中运行sbt run后运行一段时间会报内存不足的错误：
// java.lang.OutOfMemoryError: GC overhead limit exceeded
// 尝试使用命令:
// set JAVA_OPTS=-Xmx4G -Xms1G && sbt run
// 如果报错就去cmd中执行。
object Main {
  def main(args: Array[String]): Unit = {
    println("Hello world!")

    val session = SparkSession.builder()
      .appName("ManualGridSize")
      .master("local[*]")
      .config("spark.driver.memory", "2g")  // try to fix java.lang.OutOfMemoryError: GC overhead limit exceeded
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.kryo.registrator", "org.apache.sedona.core.serde.SedonaKryoRegistrator")
      .config("spark.scheduler.listenerbus.eventqueue.threads", "4")
      .getOrCreate()
    session.sparkContext.setLogLevel("ERROR")

    try {
      val spark = SedonaContext.create(session)
      println("[********]Sedona initialized successfully![********]")
      // [1] 需要切割的一个区域保存在postgres数据库中，先读取出来。
      //     有两块区域，一块是区域的边界polygon, 另一块是陆地边界polygon
      val jdbcUrl = "jdbc:postgresql://192.168.1.53:5432/postgres"
      val connectionProperties = new java.util.Properties()
      connectionProperties.put("user", "gis_read")
      connectionProperties.put("password", "isd5Wp3h2sqQ")
      connectionProperties.put("driver", "org.postgresql.Driver")
      val areaDF = spark.read
        .jdbc(jdbcUrl, "public.mg_zone", connectionProperties)
        .selectExpr("ST_GeomFromWKB(geom) AS geom")
      areaDF.cache()
      //dataDF.show(1) //TEST!
      val landDF = spark.read
        .jdbc(jdbcUrl, "public.mg_zone_ld", connectionProperties)
        .selectExpr("ST_GeomFromWKB(geom) AS geom")
      landDF.cache()
      //landDF.show(1) //TEST!

      // [2]根据区域边界，创建一大一小两个grid集合。它们是操作的‘底图’
      val largeGrids = makeGrid(spark, areaDF, 0.5)
      println("[********]large grids[********]")
      largeGrids.show(5) //TEST!

      val smallGrids = makeGrid(spark, areaDF, 0.1)
      println("[********]small grids[********]")
      smallGrids.show(5) //TEST!

      // [3]各种gis操作，将地图中的陆地区域算出来
      calcLands(spark, landDF, largeGrids, smallGrids)

    }
    finally {
      session.stop()
    }
  }

  def createIndexType(data: DataFrame):Unit = {
    val rdd = new SpatialRDD[org.locationtech.jts.geom.Geometry]()
    rdd.rawSpatialRDD = Adapter.toSpatialRdd(data, "geom").rawSpatialRDD
    rdd.analyze()
    // 空间分区
    rdd.spatialPartitioning(GridType.KDBTREE)
    // 构建索引
    rdd.buildIndex(IndexType.RTREE, true)
  }

  def calcLands(spark: SparkSession, lands: DataFrame, largeGrids: DataFrame, smallGrids: DataFrame): Unit = {
    createIndexType(lands)
    lands.createOrReplaceTempView("lands")

    //[1] 生成小网格，排除陆地的小网格
    val bufferDistance = 100 * 0.01667 // 近似转换为度,0.01667=1海里
    smallGrids.createOrReplaceTempView("t_small_grids")
    val seaSmallGridSql =
      s"""
         |WITH tmp AS (
         |    SELECT t1.grid_id
         |    FROM t_small_grids t1
         |    JOIN lands t2 ON ST_Intersects(t2.geom, t1.geom)
         |)
         |,tmp2 as (
         |SELECT distinct t1.grid_id
         |    FROM t_small_grids t1
         |    JOIN lands t2 ON ST_Intersects(ST_Buffer(t2.geom, ${bufferDistance}),t1.geom) --创建海岸线缓存区，
         |)
         |SELECT tt1.*
         |FROM t_small_grids tt1
         |join tmp2 tt3 on tt1.grid_id = tt3.grid_id  --保留在海岸线的网格，但是不在陆地的小网格
         |WHERE NOT EXISTS (
         |    SELECT 1
         |    FROM tmp tt2
         |    WHERE tt2.grid_id = tt1.grid_id
         |)
         |""".stripMargin
    val seaSmallGridDF = spark.sql(seaSmallGridSql).select("geom","grid_id","i","j")
    println(s"min grid size : ${seaSmallGridDF.count()}")

    seaSmallGridDF.createOrReplaceTempView("t_small_grids_near_coach")
    // [2] 排除在陆地的大网格
    largeGrids.createOrReplaceTempView("t_large_grids")
    // tmp: 大网格列表中，和陆地相交的部分
    // tm2: 大网格列表中，包含了上一步生成小网格的部分
    // 获取大网格列表中满足以下两个条件之一的子集：(1)不和陆地相交的。 (2)大网格包含了小网格的（海岸缓冲区域）。
    val seaLargeGridSql =
      """
        |WITH tmp AS (
        |    SELECT t1.*
        |    FROM t_large_grids t1
        |    JOIN lands t2 ON ST_Intersects(t2.geom, t1.geom)
        |),
        | tmp2 as (
        |    SELECT t1.*
        |    FROM t_large_grids t1
        |    JOIN t_small_grids_near_coach t2 ON ST_Contains(t1.geom, t2.geom)
        |)
        |SELECT tt1.*
        |FROM t_large_grids tt1
        |WHERE NOT EXISTS (
        |    SELECT 1
        |    FROM tmp tt2
        |    WHERE tt2.grid_id = tt1.grid_id
        |) or EXISTS ( SELECT 1
        |    FROM tmp2 tt2
        |    WHERE tt2.grid_id = tt1.grid_id)
        |""".stripMargin
    val seaLargeGridDF = spark.sql(seaLargeGridSql).select("geom","grid_id","i","j")
    println(s"big grid size : ${seaLargeGridDF.count()}")

    // [3]输出结果到本地文件
    seaSmallGridDF.createOrReplaceTempView("small_grid")
    seaSmallGridDF.write.format("geoparquet").mode(SaveMode.Overwrite).save("D:\\temp\\jl\\small_geoparquet")
    seaLargeGridDF.createOrReplaceTempView("large_grid")
    seaLargeGridDF.write.format("geoparquet").mode(SaveMode.Overwrite).save("D:\\temp\\jl\\large_geoparquet")
  }

  /**
   * 将DataFrame data中的第一个区域切割成指定大小的网格平面。
   * @param spark spark session
   * @param data 包含了'geom'列的DataFrame
   * @param gridSize 网格大小
   * @return
   */
  def makeGrid(spark: SparkSession, data: DataFrame, gridSize: Double): DataFrame = {
    data.createOrReplaceTempView("envelope")
    val sql =
      s"""
         |SELECT ST_XMin(ST_Envelope_Aggr(geom)) AS minX,
         |       ST_XMax(ST_Envelope_Aggr(geom)) AS maxX,
         |       ST_YMin(ST_Envelope_Aggr(geom)) AS minY,
         |       ST_YMax(ST_Envelope_Aggr(geom)) AS maxY
         |FROM envelope
         |""".stripMargin
    val row = spark.sql(sql).take(1)
    val envelopeMinX = row(0).getAs[Double]("minX")
    val envelopeMinY = row(0).getAs[Double]("minY")
    val envelopeMaxX = row(0).getAs[Double]("maxX")
    val envelopeMaxY = row(0).getAs[Double]("maxY")

    val grids = genBaseGridWithSize(spark,
      envelopeMinX, envelopeMaxX, envelopeMinY, envelopeMaxY,
      gridSize, gridSize)

    grids
  }

  /**
   * 将输入4个坐标指定的一个大区域切割成指定宽高度的网格。返回这些网格组成的DataFrame
   * @param spark spark session
   * @param envelopeMinX 区域的坐标MinX
   * @param envelopeMaxX 区域的坐标MaxX
   * @param envelopeMinY 区域的坐标MinY
   * @param envelopeMaxY 区域的坐标MaxY
   * @param gridWidth 切割网格时规定的网格宽度
   * @param gridHeight 切割网格时规定的网格高度
   * @return DataFrame [grid_id,i,j,geom]
   */
  def genBaseGridWithSize(spark: SparkSession,
                          envelopeMinX: Double, envelopeMaxX: Double,
                          envelopeMinY: Double, envelopeMaxY: Double,
                          gridWidth: Double, gridHeight: Double): DataFrame = {
    // 输出结果
    // 计算网格的行列数
    val numCols = math.ceil((envelopeMaxX - envelopeMinX) / gridWidth).toInt
    val numRows = math.ceil((envelopeMaxY - envelopeMinY) / gridHeight).toInt
    // 创建GeometryFactory 它是JTS (JTS Topology Suite)库中的核心类，用于创建各种几何对象（Geometry）。作为地理空间计算的基础
    // 工厂类，它提供了创建点、线、多边形等几何对象的统一入口。
    val geometryFactory = new GeometryFactory()
    // 生成网格多边形，生成List[(x序号, y序号, 网格多边形)]
    val gridPolygons = (for {
      i <- 0 until numRows
      j <- 0 until numCols
    } yield {
      val minX = envelopeMinX + j * gridWidth
      val minY = envelopeMinY + i * gridHeight
      val maxX = minX + gridWidth
      val maxY = minY + gridHeight
      val coordinates = Array(
        new Coordinate(minX, minY),
        new Coordinate(maxX, minY),
        new Coordinate(maxX, maxY),
        new Coordinate(minX, maxY),
        new Coordinate(minX, minY) // 闭合
      )
      (i.toLong, j.toLong, geometryFactory.createPolygon(coordinates).toString)
    })

    // 从列表创建RDD，再从RDD创建DataFrame
    val gridPolygonsRDD = spark.sparkContext.parallelize(gridPolygons).map(row => Row(row._1, row._2, row._3))
    // StructType是Spark SQL中定义结构化数据模式的核心类，用于描述 DataFrame/Dataset 的列结构和数据类型。它是 Spark 类型系统的基石之一。
    val envelopeSchema = StructType(
      Array(
        StructField("i", LongType, nullable = true),
        StructField("j", LongType, nullable = true),
        StructField("geom", StringType, nullable = true),
      )
    )
    val windowSpec = Window.orderBy("i", "j")
    val maxGridDF = spark.createDataFrame(gridPolygonsRDD, envelopeSchema)
      .withColumn("grid_id", row_number().over(windowSpec).cast("Long"))
      .withColumn("geom", expr("ST_GeomFromText(geom)"))
    maxGridDF
  }

}


