package cn.doitedu.datayi.utils

import java.util.Properties

import ch.hsr.geohash.GeoHash
import org.apache.spark.sql.SparkSession

/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-08-05
 * @desc spark整合hive要做3件事：
 *          sparksession.enableHiveSupport()
 *          添加spark-hive整合依赖包
 *          添加配置参数（文件）
 */
object AreaDictUtil {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName("地理位置字典加工")
      .config("spark.sql.shuffle.partitions","1")
      .enableHiveSupport()   // 开启hive的整合支持
      .master("local")
      .getOrCreate()

    val props = new Properties()
    props.put("user","root")
    props.put("password","ABC123abc.123")
    val df = spark.read.jdbc("jdbc:mysql://hdp01:3306/realtimedw?useUnicode=true&characterEncoding=utf-8", "t_md_areas", props)

    df.createTempView("area")



    val gps2geo = (lat:Double,lng:Double)=>{
      GeoHash.geoHashStringWithCharacterPrecision(lat,lng,6)
    }

    spark.udf.register("geo",gps2geo)

    spark.sql(
      """
        |
        |insert into table dim23.area_dict
        |select
        |  geo(l4.bd09_lat,l4.bd09_lng) as geohash,
        |  l1.areaname as province,
        |  l2.areaname as city,
        |  l3.areaname as region
        |from
        |  area l4 join area l3 on l4.level=4 and l4.parentid=l3.id
        |          join area l2 on l3.parentid=l2.id
        |          join area l1 on l2.parentid=l1.id
        |
        |group by geo(l4.bd09_lat,l4.bd09_lng),l1.areaname,l2.areaname,l3.areaname
        |
        |""".stripMargin)

    spark.close()


  }
}
