package com.shujia.dim

import com.shujia.utils.SparkTool
import com.shujia.utils.poly.Polygon
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object DimScenicGrid extends SparkTool {
  override def run(spark: SparkSession): Unit = {
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //判断经纬度时候在边界内
    val polygonContains: UserDefinedFunction = udf((lon: String, lat: String, boundary: String) => {
      val polygon = new Polygon(boundary)
      //判断一个点是否在一个边界内
      polygon.contains(lon.toDouble, lat.toDouble)
    })


    //1、读取景区配置表

    val scenic: DataFrame = spark.table("dim.dim_scenic_boundary")

    //2、读取网格配置表
    val geotag: DataFrame = spark.table("dim.dim_geotag_grid")

    /**
      * 循环判断网格是否在景区内，取出景区内所有的网格
      *
      */

    val resultDF: DataFrame = scenic
      .crossJoin(geotag)
      //判断网格中心点是否在景区边界内
      .where(polygonContains($"center_longi", $"center_lati", $"boundary"))
      .select($"scenic_id", $"scenic_name", $"county_id", $"grid_id")
      .groupBy($"scenic_id", $"scenic_name", $"county_id")
      //将同一个景区内的多个网格合并成一行
      .agg(collect_set($"grid_id") as "grids")
      .select($"scenic_id", $"scenic_name", $"county_id", concat_ws(",", $"grids") as "grids")

    resultDF.write
      .format("csv")
      .option("sep", "\t")
      .mode(SaveMode.Overwrite)
      .save("/daas/motl/dim/dim_scenic_grid")

  }
}
