package com.shujia.dim

import com.shujia.poly.Polygon
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object ScenicGridApp extends Logging {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .appName("ScenicGridApp")
      .config("spark.sql.shuffle.partitions", "1")
      .enableHiveSupport() //使用hive的元数据
      .getOrCreate()


    val grid: DataFrame = spark.sql("select * from dim.dim_geotag_grid")
    val scenicBoundary: DataFrame = spark.sql("select * from dim.dim_scenic_boundary")

    /**
     * 匹配出所有景区内的网格
     *
     */

    grid.crossJoin(scenicBoundary.hint("broadcast"))
      .select("grid_id", "center_longi", "center_lati", "scenic_id", "scenic_name", "boundary")
      .filter(row => {

        //网格中心点经纬度
        val center_longi: Double = row.getAs[String]("center_longi").toDouble
        val center_lati: Double = row.getAs[String]("center_lati").toDouble

        //景区边界
        val boundary: String = row.getAs[String]("boundary")

        //通过网格中心点经纬度判断这个网格是否在景区边界内，如果在景区边界内则保留数据，如果不在景区边界内则过滤掉
        val polygon = new Polygon(boundary)
        polygon.contains(center_longi, center_lati)

      })
      .select("scenic_id", "scenic_name", "grid_id")
      .distinct()
      .write
      .format("csv")
      .option("sep", "\t")
      .mode(SaveMode.Overwrite)
      .save("/daas/motl/dim/dim_scenic_grid")

  }
}
