package com.shujia.tour

import com.shujia.spark.SparkTool
import com.shujia.tour.DalTourProvinceIndex.conf
import com.shujia.util.{Geography, SSXRelation}
import org.apache.spark.sql.SaveMode

/**
  * 省游客计算
  *
  * 游客定义
  * 出行距离大于10km
  * 常住地在用户画像表中
  *
  * 在省内停留时间大于3个小时
  *
  */
object DalTourProvince extends SparkTool {
  /**
    * 在run方法里面编写spark业务逻辑
    */
  override def run(args: Array[String]): Unit = {

    //获取分区
    if (args.length == 0) {
      LOGGER.error("请指定参数")
      return
    }

    val dayId = args(0)

    val pointInputPath = Constants.STAYPOINT_INPUT_PATH + Constants.PARTITION_NAME + dayId
    val userTagInputPath = Constants.USERTAG_INPUT_PATH + "/month_id=201805"


    LOGGER.info(s"停留表输入路径：$pointInputPath")
    LOGGER.info(s"用户画像表输入路径：$userTagInputPath")


    val proviceOutputPath = Constants.PROVINCE_OUTPUT_PATH + Constants.PARTITION_NAME + dayId
    LOGGER.info(s"省游客表输出路径：$proviceOutputPath")
    //读取停留表数据
    /**
      * +--------------------+------------------+------------------+---------------+---------+--------+---------------+--------------+
      * |                 mdn|             longi|              lati|        grid_id|county_id|duration|grid_first_time|grid_last_time|
      * +--------------------+------------------+------------------+---------------+---------+--------+---------------+--------------+
      * |48E9E1D650EA71977...|          117.2425|           31.9775|117240031975040|  8340121|      -8| 20180503150218|20180503145418|
      *
      *
      */
    val pointDF = sql.read.parquet(pointInputPath)
    pointDF.printSchema()
    /**
      *
      * +--------------------+--------------------+------+---+--------------------+-----------+-----------+-----------+-----+------+---------------+--------------+
      * |                 mdn|                name|gender|age|           id_number|number_attr|trmnl_brand|trmnl_price|packg|conpot|   resi_grid_id|resi_county_id|
      * +--------------------+--------------------+------+---+--------------------+-----------+-----------+-----------+-----+------+---------------+--------------+
      * |1D2916F9ACFBFA279...|AA3DB6AB27731E170...|     2| 20|AA3DB6AB27731E170...|      86322|         魅族|       4300|   29|     9|117255031870040|       8340104|
      *
      */
    val userTagDF = sql.read.parquet(userTagInputPath)
    userTagDF.printSchema()

    //用户画像表关联停留表，回去用户常住地网格
    val joinRDD = pointDF.join(userTagDF, "mdn")


    //手机号和省id作为key进行分组
    val kvRDD = joinRDD.map(row => {
      //手机号
      val mdn = row.getAs[String]("mdn")
      //停留点区县
      val county_id = row.getAs[String]("county_id")
      //停留点网格
      val grid_id = row.getAs[String]("grid_id")
      //停留时间
      val duration = row.getAs[Integer]("duration")
      //常住地网格
      val resi_grid_id = row.getAs[String]("resi_grid_id")
      //常住地区县
      val resi_county_id = row.getAs[String]("resi_county_id")

      //获取省id
      val provinceId = SSXRelation.COUNTY_PROVINCE.get(county_id)

      //以手机号和省id作为key进行分组
      val key = mdn + "\t" + provinceId + "\t" + resi_county_id

      (key, s"$grid_id\t$duration\t$resi_grid_id")
    })
    /**
      * 出行距离大于10km
      * 在省内停留时间大于3个小时
      */
    val filterRDD = kvRDD
      //将同一个人在统一省的数据放到一起处理
      .groupByKey()
      .map {
        case (mdnAndPId: String, itr: Iterable[String]) => {

          //计算停留时间和出游距离

          val split = mdnAndPId.split("\t")

          val mdn = split(0)
          val pid = split(1)
          val resi_county_id = split(2)


          //计算最远的点
          val points = itr.toList

          //所有点当常住地的距离
          val distances = points.map(line => {
            val split = line.split("\t")
            //目的地网格id
            val grid_id = split(0).toLong
            //常住地网格id
            val resi_grid_id = split(2).toLong
            //计算两个嗲的距离
            val distance = Geography.calculateLength(grid_id, resi_grid_id)
            distance
          })

          //获取最远距离
          val maxDistance = distances.max


          //计算总停留时间
          val sumDuration = points.map(line => {
            val split = line.split("\t")
            val duration = split(1).toInt
            duration
          }).sum

          Province(mdn, resi_county_id, pid, sumDuration.toDouble, maxDistance)

        }
      }.filter(p => {
      //停留时间
      val sumDuration = p.d_stay_time
      //出游距离
      val maxDistance = p.d_max_distance

      /**
        * 出行距离大于10km
        * 在省内停留时间大于3个小时
        */
      sumDuration > 180 && maxDistance > 10000
    })

    val s = sql
    import s.implicits._

    //将数据写入hdfs

    filterRDD
      .toDF()
      .write
      .mode(SaveMode.Overwrite)
      .parquet(proviceOutputPath)

    /**
      * mdn string comment '手机号大写MD5加密'
      * ,source_county_id string comment '游客来源区县'
      * ,d_province_id string comment '旅游目的地省代码'
      * ,d_stay_time double comment '游客在该省停留的时间长度（小时）'
      * ,d_max_distance double comment '游客本次出游距离'
      *
      */


  }

  /**
    * 初始化spark配置
    *  conf.setMaster("local")
    */
  override def init(): Unit = {
    conf.set("spark.shuffle.file.buffer", "64k") // spark  shuffle  过程数据落地缓存内存大小
    conf.set("spark.reducer.maxSizeInFlight", "96m") //reduce去map中一次最多拉去多少数据
    conf.set("spark.shuffle.io.maxRetries", "10") //shuffle read task从shuffle write task所在节点拉取属于自己的数据时  重试次数
    conf.set("spark.shuffle.io.retryWait", "60s") //shuffle read task从shuffle write task所在节点拉取属于自己的数据时  等待时间

    //两个一起调节，剩下0.2是task运行时可以使用的内存
    conf.set("spark.shuffle.memoryFraction", "0.4") // shuffle  内存占比
    conf.set("spark.storage.memoryFraction", "0.4") //  RDD持久化可以使用的内存
    conf.set("spark.sql.shuffle.partitions", "10")

  }

  case class Province(mdn: String, source_county_id: String, d_province_id: String, d_stay_time: Double, d_max_distance: Double)

}
