package com.shujia.dim

import java.awt.geom.Point2D

import com.shujia.common.grid.Grid
import com.shujia.common.poly.Polygon
import com.shujia.common.util.SparkTool
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object DIMScenicGrid extends SparkTool {
  override def run(spark: SparkSession): Unit = {
    import spark.implicits._
    import org.apache.spark.sql.functions._
    //1、景区配置表
    val sceniBoundary: DataFrame = spark.table("dim.dim_scenic_boundary")

    //2、网格配置表
    val geotagGrid: DataFrame = spark.table("dim.dim_geotag_grid")


    val containsBoundary: UserDefinedFunction = udf((resi_grid_id: String, boundary: String) => {

      val start: Long = System.currentTimeMillis()
      val polygon = new Polygon(boundary)
      val point: Point2D.Double = Grid.getCenter(resi_grid_id.toLong)
      //判断点是否在边界内
      val flag: Boolean = polygon.contains(point)

      val end: Long = System.currentTimeMillis()
      println(end - start)

      flag
    })


    /**
      * 取出每隔景区所有的网格
      *
      * collect_list: 和explode 功能相反
      * array_join： 和split功能相反
      *
      */

    sceniBoundary
      .crossJoin(geotagGrid)
      //取出景区内所有的网格
      .filter(containsBoundary($"grid_id", $"boundary"))
      //将同一个景区内的网格合并成一行
      .groupBy($"scenic_id", $"scenic_name")
      .agg(collect_list($"grid_id") as "grids")
      .select($"scenic_id", $"scenic_name", array_join($"grids", ",") as "grids")
      .write
      .format("csv")
      .option("sep", "\t")
      .mode(SaveMode.Overwrite)
      .save("/daas/motl/dim/dim_scenic_grid")

  }
}
