package com.example.RouteMap

import org.apache.spark.HashPartitioner
import org.apache.spark.graphx.PartitionStrategy.{CanonicalRandomVertexCut, EdgePartition1D, EdgePartition2D, RandomVertexCut}
import org.apache.spark.graphx.{Edge, EdgeDirection, Graph, VertexId}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

import java.util.Date

object QTreeFind {
  def main(args: Array[String]): Unit = {
    println("Hello world!")

    val isLocalDebug = true
    val builder = SparkSession.builder().appName("RouteMap")
    //.config("spark.ui.enabled", "false")  // 尝试解决（并无卵用）：任务完成后日志中持续出现 INFO AsyncEventQueue: Process of event SparkListenerJobStart .... by listener AppStatusListener took 8.12s 这类消息时，表明 Spark 的状态监听器(AppStatusListener)处理事件耗时过长，导致事件队列积压。
    //.config("spark.appStatusListener.enabled", "false")
    //.config("spark.ui.retainedJobs", "100")
    //.config("spark.ui.retainedStages", "200")
    //.config("spark.ui.retainedTasks", "1000")
    if (isLocalDebug) {
      builder.master("local[*]")//.config("spark.driver.memory", "2g")  // try to fix java.lang.OutOfMemoryError: GC overhead limit exceeded
    }
    val session = builder.getOrCreate()
    session.sparkContext.setLogLevel("WARN")

    val numCores = 24 //Runtime.getRuntime.availableProcessors() // 似乎不能这样用，和实际数量对不上

    val vDFUrl = if (isLocalDebug) "D:\\temp\\jl\\testv.json" else "alluxio://192.168.20.208:19998/tmp/testv.json"
    val vDF = session.read.json(vDFUrl) // .repartition(numCores)
    val vertices: RDD[(VertexId, (Double, Double))] = vDF.rdd.map { row =>
      (row.getAs[Long]("id"), (row.getAs[Double]("center_x"), row.getAs[Double]("center_y")))
    } //.repartition(numCores)

    val eDFUrl = if (isLocalDebug) "D:\\temp\\jl\\teste.json" else "alluxio://192.168.20.208:19998/tmp/teste.json"
    val eDF = session.read.json(eDFUrl) // .repartition(numCores)
    val edges: RDD[Edge[Double]] = eDF.rdd.map {
      row => Edge(row.getAs[Long]("v1"), row.getAs[Long]("v2"), row.getAs[Double]("w"))
    } //.repartition(numCores)

    // 注意！GraphX本身只直接支持有向图，如果json文件里的边是有向边直接读取即可。
    val graph = Graph(vertices, edges).partitionBy(CanonicalRandomVertexCut)  // .cache() 在这里加cache反而会慢一点。尚不知原因。
    println("[****]numCores:" + numCores)
    println("[****]graph.edges.partitions.size:"+ graph.edges.partitions.length) // 可能需要先cache一下(或者添加一个类似count的action)
    println("[****]vDF patitions:" + vDF.rdd.getNumPartitions)
    println("[****]eDF patitions:" + eDF.rdd.getNumPartitions)
    println("[****]vertices partitions:" + vertices.getNumPartitions)
    println("[****]edges: partitions:" + edges.getNumPartitions)
    println("[****]graph  stats:" + graph.degrees.map(_._2).stats())
    graph.edges.mapPartitionsWithIndex((idx, iter) => Iterator((idx, iter.size))).collect().foreach(r=>println("my_partition " + r))
    // 否则，需要对调src和dest形成对向的另一条边。用下面这两行代替上面那一行的代码。
    //    val doubleDirEdges = edges.flatMap{ edge => Seq(edge, Edge(edge.dstId, edge.srcId, edge.attr)) }
    //    val graph = Graph(vertices, doubleDirEdges)

    // 测试！计算连通分量&打印结果
    // println(graph.vertices.count())

    // deepseek推荐的优化(待研究)
    // (1)自定义分区器，将相邻顶点分配到相同分区
    // class VertexRangePartitioner(partitions: Int) extends Partitioner
    // 构建顶点 RDD 并应用自定义分区器
    // val partitionedVertices = vertices.partitionBy(new VertexRangePartitioner(2))
    // 构建边 RDD（边会自动跟随顶点分区）
    // val edges: RDD[Edge[String]] = ... // 你的边数据
    // val graph = Graph(partitionedVertices, edges)
    //
    // (2)自定义分区策略：按顶点 ID 范围分配，使相邻顶点更可能在同一分区
    // spark2：object CustomPartitionStrategy extends PartitionStrategy，spark3已经不允许这样做
    // 在 Spark 3.x 版本中，PartitionStrategy 是一个 sealed trait，它已经内置了 5 种实现（如 RandomVertexCut、EdgePartition2D 等），不允许用户自定义新的实现。
    // val partitionedGraph = graph.partitionBy(EdgePartition2D)
    // -------------------------------------
    //用graphx.pregel()进行单源最短路径计算。
    // Test src is (110.0, 338.0) , dst is (484.0, 372.0)
    // id: 110*512+338=56658, 484*512+372=248180
    val startTime = new Date().getTime
    val path = getShortestPath(graph, 248180, 56658)
    path match {
      case Some(p) =>
        println(s"[****] Path found(${p.length} steps):")
        println(p.mkString(","))
      case None =>
        println("[****] No valid path exists.")
    }
    println("[****] fininshed!!!")
    val endTime = new Date().getTime
    println(s"[****] ${(endTime-startTime)/1000.0}")
    // 仅开发调试用！在应用程序末尾添加等待,以便继续查看spark ui(http://localhost:4040)。
    if (isLocalDebug) {
      println("[****]input any key and press enter to quit.")
      System.in.read()  // 等待用户输入才关闭
    }
    session.stop()
  }

  // returns Graph[(Double, List[VertexId]), Double]
  // 其中VD(Double, List[VertexId])是指(总距离, 经过的节点id)
  private def singleSourceShortestPath(graph: Graph[(Double, Double), Double], sourceId: VertexId) = {
    // 顶点map操作 VE [(Double, Double)] => VE [(Double, List[VertexId])],即(距离, 经过节点列表)
    // 顶点属性暂忽略不用
    val initialGraph: Graph[(Double, List[VertexId]), Double] = graph.mapVertices {
      case (id, _) =>
        if (id == sourceId)
          (0.0, List(id))
        else
          (Double.PositiveInfinity, List())
    }
    // 初始消息(距离无限大, 空顶点列表)
    // (initialMsg: A, ...)
    // 消息A格式: [Double, List[VertexId]()]
    val sssp = initialGraph.pregel((Double.PositiveInfinity, List[VertexId]()), 50, activeDirection = EdgeDirection.Out )(
      // 顶点处理函数原型vprog: (VertexId, VD, A) => VD
      // 收到消息[Double, List[VertexId]()]后如果消息中的距离小于当前值，则将消息中的距离代替当前值。
      (_, dist, newDist) => if (dist._1 < newDist._1) dist else newDist,
      // 发送消息函数 sendMsg: EdgeTriplet[VD, ED] => Iterator[(VertexId, A)]
      // triplet: 三元组，顶点和边的属性src.id, dst.id, src.attr, e.attr, dst.attr
      // srcAttr和dstAttr是消息，原型为[Double, List[VertexId]()]；attr是边属性‘权重’对应json文件每一行'w'的值
      // 下面这行代码的意思是，如果源顶点的通行距离（srcAttr._1，即Double值）加上边权重triplet.attr小于目标顶点的通行距离
      // （dstAttr._1）,则向目标顶点发送消息迭代更短的距离和对应的路径。
      triplet => {
        if (triplet.srcAttr._1 + triplet.attr < triplet.dstAttr._1) {
          Iterator((triplet.dstId, (triplet.srcAttr._1 + triplet.attr, triplet.srcAttr._2 :+ triplet.dstId)))
        } else {
          Iterator.empty
        }
      },
      // 合并消息函数 (A, A) => A)：取距离小的
      (a, b) => if (a._1 < b._1) a else b
    )
    sssp
  }

  private def getShortestPath(graph: Graph[(Double, Double), Double],
                              startId: Long,
                              endId: Long) = {
    val path = singleSourceShortestPath(graph, startId)
    // path Graph[(Double, List[VertexId]), Double]
    // graph VD: (Double, List[VertexId])
    // filter (pred: Tuple2[VertexId, VD] => Boolean): VertexRDD[VD]
    // _1是顶点ID， _2是(Double, List[VertexId])，即距离和路径（经过的顶点列表）
    println(f"total count is ${path.vertices.count()}")
    val x = path.vertices.filter(_._1 == endId).first()
    //    println(f"target id: ${x._1}, target paths: ")
    //    println(x._2.toString())  //(Infinity,List())
    x._2 match {
      case (distance, path) if java.lang.Double.isFinite(distance) => {
        Some(path)
      }
      case _ => None
    }
  }
}