package com.shujia.dim

import com.shujia.common.SparkTool
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object DimScenicGrid extends SparkTool {
  override def run(spark: SparkSession): Unit = {
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 加载景区边界配置表
    val scenicBounDF: DataFrame = spark.table("dim.dim_scenic_boundary")

    // 加载网格配置表
    val geoTagDF: DataFrame = spark.table("dim.dim_geotag_grid")

    // 没有关联条件 所以直接做笛卡尔积
    geoTagDF
      .crossJoin(scenicBounDF.hint("broadcast"))
      // 依次通过网格id判断是否在景区的边界当中
      .where(isInBoundary($"center_longi",$"center_lati", $"boundary"))
      // 按照景区id、景区名称进行分组，将景区包含的所有的网格转换成集合Set最终再变成字符串保存
      .groupBy($"scenic_id", $"scenic_name")
      .agg(collect_set($"grid_id") as "grid_set")
      .select($"scenic_id", $"scenic_name", concat_ws(",", $"grid_set") as "grids")
    // 保存
      .write
      .mode(SaveMode.Overwrite)
      .format("csv")
      .option("sep", "\t")
      .save(s"/daas/motl/dim/dim_scenic_grid")

  }
}
