package com.ctyun.dim

import com.shujia.utils.SparkTool
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object DimScenicGrid extends SparkTool {
  override def run(spark: SparkSession): Unit = {
    import spark.implicits._
    import org.apache.spark.sql.functions._


    // 读取景区边界表
    val scenicBoundaryDF: DataFrame = spark.table("dim.dim_scenic_boundary")

    // 读取网格配置表
    val gridDF: DataFrame = spark.table("dim.dim_geotag_grid")

    gridDF
      // 两张表直接做笛卡尔积 找出每个景区所包含的网格id
      .crossJoin(scenicBoundaryDF.hint("broadcast"))
      .where(isInBoundaryWithGrid($"grid_id", $"boundary"))
      // 将每个景区的所有的网格记录合并到一条数据中
      .groupBy($"scenic_id", $"scenic_name")
      .agg(collect_set($"grid_id") as "grid_id_arr")
      .select($"scenic_id"
        , $"scenic_name"
        , concat_ws(",", $"grid_id_arr") as "grids")
      .write
      .format("csv")
      .option("sep", "\t")
      .mode(SaveMode.Overwrite)
      .save(s"/daas/motl/dim/dim_scenic_grid")



  }
}
