package com.shujia.point

import com.shujia.grld.Grid
import com.shujia.spark.SparkTool
import com.shujia.util.DateUtil
import org.apache.spark.sql.SaveMode

object MakeStayPoint extends SparkTool {
  /**
    * 在run方法里面编写spark业务逻辑
    */
  override def run(args: Array[String]): Unit = {

    //获取分区
    if (args.length == 0) {
      LOGGER.error("请指定参数")
      return
    }

    val dayId = args(0)


    //融合表输入路径
    val mergeInputPath = Constants.MERGELOCATION_INPUT_PATH + Constants.PARTITION_NAME + dayId
    LOGGER.info(s"融合表输入路径:$mergeInputPath")

    //停留表输出路径

    val staypointPOutPutPath = Constants.STAYPOINT_OUTPUT_PATH + Constants.PARTITION_NAME + dayId

    LOGGER.info(s"停留表输出路径：$staypointPOutPutPath")


    /**
      * 读取数据
      *
      * 融合表
      * mdn string comment '手机号码'
      * ,start_time string comment '业务开始时间'
      * ,county_id string comment '区县编码'
      * ,longi string comment '经度'
      * ,lati string comment '纬度'
      * ,bsid string comment '基站标识'
      * ,grid_id string comment '网格号'
      * ,biz_type string comment '业务类型'
      * ,event_type string comment '事件类型'
      * ,data_source string comment '数据源'
      *
      *
      * 停留表
      *
      * mdn string comment '用户手机号码'
      * ,longi string comment '网格中心点经度'
      * ,lati string comment '网格中心点纬度'
      * ,grid_id string comment '停留点所在电信内部网格号'
      * ,county_id string comment '停留点区县'
      * ,duration string comment '机主在停留点停留的时间长度（分钟）,lTime-eTime'
      * ,grid_first_time string comment '网格第一个记录位置点时间（秒级）'
      * ,grid_last_time string comment '网格最后一个记录位置点时间（秒级）'
      * )
      *
      */


    //读取融合表数据
    val mergeDF = sql.read.parquet(mergeInputPath)

    //取出手机号，停留时间，网格id
    val sDF = mergeDF.select("mdn", "start_time", "grid_id", "county_id")
    //通过手机号和网络id，区县id进行分组
    val groupRDD = sDF.rdd.map(row => {
      val mdn = row.getAs[String]("mdn")
      val time = row.getAs[String]("start_time")
      val grid_id = row.getAs[String]("grid_id")
      val county_id = row.getAs[String]("county_id")

      (mdn + "_" + grid_id + "_" + county_id, time)
    }).groupByKey() //通过手机号和网络id，区县id进行分组


    val pointRDD = groupRDD.map {
      /**
        * mdnAndGrid 手机号和网格id
        * times  同一个人在同一个网格的多个停留的时间
        *
        */
      case (mdnAndGridAndcountId: String, times: Iterable[String]) =>

        /**
          * 计算一个人在一个网格内的停留时间
          * 1、对所有时间进行排序
          * 2、取第一个点的开始时间和最后一个点的结束时间
          *
          */
        val sortTimes = times
          .toList
          .map(_.split(","))
          .map(time => (time(0), time(1)))
          .sortBy(_._1) //时间排序

        val grid_first_time = sortTimes.head._1
        //第一个点
        val grid_last_time = sortTimes.last._2 //最后一个点


        //计算两个时间的差值
        val duration = math.abs(DateUtil.betweenM(grid_first_time, grid_last_time))

        val split = mdnAndGridAndcountId.split("_")
        val mdn = split(0)
        val grid_id = split(1)
        val county_id = split(2)


        //获取网格中心点经纬度
        val point = Grid.getCenter(grid_id.toLong)

        val longi = point.x
        //网格中心点经度
        val lati = point.y //网格中心点维度


        //按顺序构建数据
        (mdn, longi, lati, grid_id, county_id, duration, grid_first_time, grid_last_time)
    }

    val s = sql
    import s.implicits._


    //将数据写入hdfs
    pointRDD
      .toDF("mdn", "longi", "lati", "grid_id", "county_id", "duration", "grid_first_time", "grid_last_time")
      .write
      .mode(SaveMode.Overwrite)
      .parquet(staypointPOutPutPath)


  }

  /**
    * 初始化spark配置
    *  conf.setMaster("local")
    */
  override def init(): Unit = {

    /**
      * spark-submit --class org.apache.spark.examples.SparkPi \
      * --master yarn-client  \
      * --num-executors 8 \
      * --executor-memory 16G \
      * --conf spark.default.parallelism=100 \
      * --conf spark.storage.memoryFraction=0.4 \
      * --conf spark.shuffle.memoryFraction=0.4 \
      * ./lib/spark-examples-1.6.0-hadoop2.6.0.jar 100
      *
      */


    conf.set("spark.shuffle.file.buffer", "64k") // spark  shuffle  过程数据落地缓存内存大小
    conf.set("spark.reducer.maxSizeInFlight", "96m") //reduce去map中一次最多拉去多少数据
    conf.set("spark.shuffle.io.maxRetries", "10") //shuffle read task从shuffle write task所在节点拉取属于自己的数据时  重试次数
    conf.set("spark.shuffle.io.retryWait", "60s") //shuffle read task从shuffle write task所在节点拉取属于自己的数据时  等待时间

    //两个一起调节，剩下0.2是task运行时可以使用的内存
    conf.set("spark.shuffle.memoryFraction", "0.4") // shuffle  内存占比
    conf.set("spark.storage.memoryFraction", "0.4") //  RDD持久化可以使用的内存

    //    conf.set("spark.default.parallelism", "100") //shuffle 并行度
    //.set("spark.shuffle.sort.bypassMergeThreshold","200")//触发bypassc机制的条件

    //conf.setMaster("local")
  }

}
