package com.example

import org.apache.sedona.core.enums.GridType
import org.apache.sedona.spark.SedonaContext
import org.apache.sedona.sql.utils.Adapter
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.functions.{col, expr, when}
import org.apache.spark.sql.types.{LongType, StructField, StructType}

import java.io.FileOutputStream
import scala.annotation.unused


object TestSpatialRdd {

  def main(args: Array[String]): Unit = {
    println("Hello world!")
    val session = SparkSession.builder()
      .appName("ManualGridSize")
      .master("local[*]")
      .config("spark.driver.maxResultSize", "2g") // org.apache.spark.SparkException: Job aborted due to stage failure: Total size of serialized results of 1 tasks (1917.5 MiB) is bigger than spark.driver.maxResultSize (1024.0 MiB)
      .config("spark.driver.memory", "2g") // try to fix java.lang.OutOfMemoryError: GC overhead limit exceeded
      .config("spark.testing.memory", "471859200") // 在IDEA中运行时：Exception in thread "main" java.lang.IllegalArgumentException: System memory 259522560 must be at least 471859200. Please increase heap size using the --driver-memory option or spark.driver.memory in Spark configuration.
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.kryo.registrator", "org.apache.sedona.core.serde.SedonaKryoRegistrator")
      .config("spark.scheduler.listenerbus.eventqueue.threads", "4")
      .getOrCreate()
    session.sparkContext.setLogLevel("ERROR")
    try {
      val spark = SedonaContext.create(session)
      import spark.implicits._
      println("[********]Sedona initialized successfully![********]")

      test2(spark, 3, 5)
//      // 创建示例数据
//      val A = Seq("A1", "A2", "A3", "A4", "A5").toDF("grid_id")
//      val B = Seq("A2", "A4").toDF("grid_id")
//
//      // 使用方法1
//      val C = A.alias("a")
//        .join(B.alias("b"), $"a.grid_id" === $"b.grid_id", "left")
//        .withColumn("a", when($"b.grid_id".isNotNull, 1).otherwise(0))
//        .select($"a.grid_id", $"a")
//
//      // 显示结果
//      C.show()
//
//    // 原始DataFrame
//    val df = spark.createDataFrame(Seq(
//        (1, "Point A", "POINT (10 20)"),
//        (2, "Point B", "POINT (30 40)"),
//        (3, "Point C", "POINT (30 50)"),
//        (4, "Point D", "POINT (30 60)"),
//        (5, "Point E", "POINT (30 70)"),
//        (6, "Point F", "POINT (30 80)"),
//        (7, "Point G", "POINT (30 90)")
//      )).toDF("id", "name", "wkt")
//      .withColumn("geometry", expr("ST_GeomFromText(wkt)"))
//
//      df.printSchema()
//
//    // 转换为SpatialRDD（保留id和name）
//    val spatialRDD = Adapter.toSpatialRdd(df, "geometry")
//
//    // 执行空间操作（示例：空间过滤）
//    spatialRDD.analyze()
//    spatialRDD.spatialPartitioning(GridType.KDBTREE, 1)
//
//    // 转回DataFrame
//    val rowRDD = spatialRDD.rawSpatialRDD.map { geom =>
//      val data = geom.getUserData.asInstanceOf[String].split("\t")
//      Row(data(0).toInt, data(1), data(2), geom) // (id, name, geometry)
//    }
//    val resultDF = spark.createDataFrame(rowRDD, df.schema)
//
//    resultDF.show()
//    // 输出：
//    // +---+-------+--------------------+
//    // | id|   name|            geometry|
//    // +---+-------+--------------------+
//    // |  1|Point A|[POINT (10 20)]...|
//    // |  2|Point B|[POINT (30 40)]...|
//    // +---+-------+--------------------+
  }
    finally {
      session.stop()
    }
  }

  private def test2(spark: SparkSession, numCols: Int, numRows: Int) = {
    val gridPolygons = for {
      x <- 0 until numCols
      y <- 0 until numRows
    } yield {
      (x.toLong, y.toLong, (x + y*numCols).toLong)
    }
    // 从列表创建RDD，再从RDD创建DataFrame。格式和下面的StructType对应
    val gridPolygonsRDD = spark.sparkContext.parallelize(gridPolygons).map(row => Row(row._1, row._2, row._3))
    // StructType是Spark SQL中定义结构化数据模式的核心类，用于描述 DataFrame/Dataset 的列结构和数据类型。它是 Spark 类型系统的基石之一。
    val gridsSchema = StructType(
      Array(
        StructField("x", LongType, nullable = true),
        StructField("y", LongType, nullable = true),
        StructField("grid_id", LongType, nullable = true),
      )
    )
    val df = spark.createDataFrame(gridPolygonsRDD, gridsSchema)
    df.show()

    // 保存到本地
    // (0)[存为二进制数组一个位置一个字节]，原始DataFrame包含(grid_id, d)列
    // 转换为byte数组 (每个值占1字节)
    println("collect grid-access and save as byte matrix.")
    val gridArray = df
      .orderBy("grid_id")
      .collect()
      .map(rows => rows.getLong(2)) // 提取grid_id值
    val byteArray = gridArray.map(_.toByte)
    // 将网格信息保存为二进制文件outfile.bin
    val path = "D:\\temp\\jl\\test1234.data"
    val fos = new FileOutputStream(path)
    fos.write(byteArray)
    fos.close()
  }
}
