package com.shujia.dim

import com.shujia.poly.Polygon
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object ScenicGridApp {
  def main(args: Array[String]): Unit = {

    /**
      * 通过景区边界过去景区网格
      *
      */

    val spark: SparkSession = SparkSession
      .builder()
      .appName("ScenicBoundaryOdsToDIm")
      .config("spark.sql.shuffle.partitions", "4")
      .enableHiveSupport() //使用hive的元数据
      .getOrCreate()


    //网格配置表
    val grid: DataFrame = spark.sql("select * from dim.dim_geotag_grid")
    //景区边界表
    val scenicBoundary: DataFrame = spark.sql("select * from dim.dim_scenic_boundary")


    /**
      * 匹配出所有景区中的网格
      */

    grid.crossJoin(scenicBoundary.hint("broadcast"))
      .select("grid_id", "center_longi", "center_lati", "scenic_id", "scenic_name", "boundary")
      .filter(row => {
        //网格中心点的经纬度
        val center_longi: Double = row.getAs[String]("center_longi").toDouble
        val center_lati: Double = row.getAs[String]("center_lati").toDouble

        //景区边界
        val boundary: String = row.getAs[String]("boundary")

        //使用工具判断点是否在边界内
        val polygon = new Polygon(boundary)

        //如果点在边界内，返回true
        //这个方式效率低
        polygon.contains(center_longi, center_lati)
      })
      .select("scenic_id", "scenic_name", "grid_id")
      .distinct()
      .write
      .format("csv")
      .option("sep", "\t")
      .mode(SaveMode.Overwrite)
      .save("/daas/motl/dim/dim_scenic_grid")


  }
}
