package com.shujia.merge

import com.shujia.spark.SparkTool
import org.apache.spark.sql.SaveMode


object MakeMergeLocation extends SparkTool {
  /**
    * 在run方法里面编写spark业务逻辑
    */
  override def run(args: Array[String]): Unit = {

    if (args.length == 0) {
      LOGGER.warn("请指定分区....")
      return
    }

    val day_id = args(0)

    //构建输入路径
    val ddrInputPath = Constants.DDR_INPUT_PATH + Constants.PARTITION_NAME + day_id
    val dpiinputPath = Constants.DPI_INPUT_PATH + Constants.PARTITION_NAME + day_id
    val wcdrInputPath = Constants.WCDR_INPUT_PATH + Constants.PARTITION_NAME + day_id
    val oiddInputPath = Constants.OIDD_INPUT_PATH + Constants.PARTITION_NAME + day_id

    LOGGER.info(s"DDR输入路径：$ddrInputPath")
    LOGGER.info(s"dpi输入路径：$dpiinputPath")
    LOGGER.info(s"wcdr输入路径：$wcdrInputPath")
    LOGGER.info(s"oidd输入路径：$oiddInputPath")
    val ddrRDD = sc.textFile(ddrInputPath)
    val dpiRDD = sc.textFile(dpiinputPath)
    val wcdrRDD = sc.textFile(wcdrInputPath)
    val oiddRDD = sc.textFile(oiddInputPath)

    //将四类数据合并成融合表

    val mergeRDD = ddrRDD.union(dpiRDD).union(wcdrRDD).union(oiddRDD)


    val mergeLocationOutPutPath = Constants.MERGELOCATION_OUTPUT_PATH + Constants.PARTITION_NAME + day_id

    LOGGER.info(s"融合表输出路径：$mergeLocationOutPutPath")

    //搭配如隐式转换
    val s = sql
    import s.implicits._

    /**
      * mdn string comment '手机号码'
      * ,start_time string comment '业务开始时间'
      * ,county_id string comment '区县编码'
      * ,longi string comment '经度'
      * ,lati string comment '纬度'
      * ,bsid string comment '基站标识'
      * ,grid_id string comment '网格号'
      * ,biz_type string comment '业务类型'
      * ,event_type string comment '事件类型'
      * ,data_source string comment '数据源'
      *
      */
    //保存数据
    val mergeDF = mergeRDD
      .coalesce(10)
      .map(line => {
        val split = line.split(Constants.DATA_SPLIT)
        val mdn = split(0)
        val start_time = split(1)
        val county_id = split(2)
        val longi = split(3)
        val lati = split(4)
        val bsid = split(5)
        val grid_id = split(6)
        val biz_type = split(7)
        val event_type = split(8)
        val data_source = split(9)
        (mdn, start_time, county_id, longi, lati, bsid, grid_id, biz_type, event_type, data_source)
      }).toDF("mdn", "start_time", "county_id", "longi", "lati", "bsid", "grid_id", "biz_type", "event_type", "data_source")


    mergeDF
      .write
      .mode(SaveMode.Overwrite) //覆盖
      .parquet(mergeLocationOutPutPath) //保存为parquet文件格式


  }

  /**
    * 初始化spark配置
    *
    */
  override def init(): Unit = {

    conf.set("spark.shuffle.file.buffer", "64k") // spark  shuffle  过程数据落地缓存内存大小
    conf.set("spark.reducer.maxSizeInFlight", "96m") //reduce去map中一次最多拉去多少数据
    conf.set("spark.shuffle.io.maxRetries", "10") //shuffle read task从shuffle write task所在节点拉取属于自己的数据时  重试次数
    conf.set("spark.shuffle.io.retryWait", "60s") //shuffle read task从shuffle write task所在节点拉取属于自己的数据时  等待时间

    //两个一起调节，剩下0.2是task运行时可以使用的内存
    conf.set("spark.shuffle.memoryFraction", "0.4") // shuffle  内存占比
    conf.set("spark.storage.memoryFraction", "0.4") //  RDD持久化可以使用的内存


    //conf.setMaster("local[8]")//集群运行需要注释掉
  }
}
