package com.shujia.dwi

import com.shujia.util.MD5
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}


/**
  *
  * 位置融合表
  *
  */

object MerGeLocationApp extends Logging {
  def main(args: Array[String]): Unit = {

    if (args.length == 0) {
      log.error("时间参数为空")
      return
    }

    //时间参数
    val day_id = args(0)
    println("asdasds")

    log.info(s"当前时间分区为：$day_id")


    //构建spark 环境
    val spark: SparkSession = SparkSession.builder()
      .appName("MerGeLocationApp")
      .config("spark.sql.shuffle.partitions", "10")
      .enableHiveSupport() //开启hive的元数据支持
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    /**
      *
      * spark中注册自定义函数
      *
      */

    spark.udf.register("ods_md5", (str: String) => MD5.md5(str))

    /**
      *
      * 在实际场景下，这一块的代码很复杂
      * 1、对数据脱敏
      * 2、从各种数据中取出位置数据
      * 3、需要对位置校准
      * 4、去除脏数据
      *
      */


    val df: DataFrame = spark.sql(
      s"""
         |select * from ods.ods_ddr where day_id=$day_id
         |union all
         |select * from ods.ods_dpi where day_id=$day_id
         |union all
         |select * from ods.ods_wcdr where day_id=$day_id
         |union all
         |select * from ods.ods_oidd where day_id=$day_id
         |
      """.stripMargin)

    /**
      *
      * mdn string comment '手机号码'
      * ,start_time string comment '业务时间'
      * ,county_id string comment '区县编码'
      * ,longi string comment '经度'
      * ,lati string comment '纬度'
      * ,bsid string comment '基站标识'
      * ,grid_id string comment '网格号'
      * ,biz_type string comment '业务类型'
      * ,event_type string comment '事件类型'
      * ,data_source string comment '数据源'
      *
      */

    val mskDF: DataFrame = df.select(
      expr("ods_md5(mdn)") as "ods_md5",
      $"start_time",
      $"county_id",
      $"longi",
      $"lati",
      $"bsid",
      $"grid_id",
      $"biz_type",
      $"event_type",
      $"data_source"
    )

    //保存数据
    mskDF.write
      .format("csv")
      .option("sep", "\t")
      .mode(SaveMode.Overwrite)
      .save(s"/daas/motl/dwi/dwi_res_regn_mergelocation_msk_d/day_id=$day_id")


    //增加分区
    spark.sql(s"alter table dwi.dwi_res_regn_mergelocation_msk_d  add if not exists partition(day_id='$day_id')")


    /**
      *
      * spark-submit --master yarn-client --class com.shujia.dwi.MerGeLocationApp --jars common-1.0.jar  dwi-1.0.jar  20180503
      *
      */

  }

}
