package com.shujia.tour

import com.shujia.redis.IndexToRedis
import com.shujia.spark.SparkTool
import com.shujia.util.{Geography, SSXRelation}
import org.apache.spark.sql.SaveMode
import redis.clients.jedis.Jedis

/**
  * 省游客指标统计
  *
  * 客流量按天 [省id,客流量]
  * 性别按天 [省id,性别,客流量]
  * 年龄按天 [省id,年龄,客流量]
  * 常住地按天 [省id,常住地市,客流量]
  * 归属地按天 [省id,归属地市,客流量]
  * 终端型号按天 [省id,终端型号,客流量]
  * 消费等级按天 [省id,消费等级,客流量]
  * 停留时长按天 [省id,停留时长,客流量]
  *
  */
object DalTourProvinceIndex extends SparkTool {
  /**
    * 在run方法里面编写spark业务逻辑
    */
  override def run(args: Array[String]): Unit = {

    //获取分区
    if (args.length == 0) {
      LOGGER.error("请指定参数")
      return
    }

    val dayId = args(0)

    val usertagPath = Constants.USERTAG_INPUT_PATH + "/month_id=201805"
    val provincePath = Constants.PROVINCE_OUTPUT_PATH + Constants.PARTITION_NAME + dayId

    //读取省游客表和用户画像表
    val usertagDF = sql.read.parquet(usertagPath)
    val provinceDF = sql.read.parquet(provincePath)

    /**
      * mdn string comment '手机号大写MD5加密'
      * ,source_county_id string comment '游客来源区县'
      * ,d_province_id string comment '旅游目的地省代码'
      * ,d_stay_time double comment '游客在该省停留的时间长度（小时）'
      * ,d_max_distance double comment '游客本次出游距离'
      *
      */

    //关联用户画像表
    val joinDF = provinceDF.join(usertagDF, "mdn")

    joinDF.cache()

    /**
      * 1、客流量按天
      */

    val flowDF = joinDF.groupBy("d_province_id").count()

    //将数据写入redis
    flowDF.foreachPartition(rows => {
      //创建redis连接

      val jedis = new Jedis("node2", 6379)
      jedis.select(10)

      rows.foreach(row => {
        val pId = row.getAs[String]("d_province_id")
        val flow = row.getAs[Long]("count")

        //以省id和时间作为key
        val key = pId + "_" + dayId

        //将客流量写入redis
        jedis.hset(key, "flow", flow.toString)

      })
      jedis.close()
    })


    /**
      * 性别按天 [省id,性别,客流量]
      *
      */
    IndexToRedis.fit(joinDF, "d_province_id", "gender", dayId)

    /**
      * 年龄按天 [省id,性别,客流量]
      *
      */
    IndexToRedis.fit(joinDF, "d_province_id", "age", dayId)

    /**
      * * 年龄按天 [省id,年龄,客流量]
      * * 常住地按天 [省id,常住地市,客流量]
      * * 归属地按天 [省id,归属地市,客流量]
      * * 终端型号按天 [省id,终端型号,客流量]
      * * 消费等级按天 [省id,消费等级,客流量]
      * * 停留时长按天 [省id,停留时长,客流量]
      */
    IndexToRedis.fit(joinDF, "d_province_id", "number_attr", dayId)
    IndexToRedis.fit(joinDF, "d_province_id", "trmnl_brand", dayId)
    IndexToRedis.fit(joinDF, "d_province_id", "trmnl_price", dayId)
    IndexToRedis.fit(joinDF, "d_province_id", "packg", dayId)
    IndexToRedis.fit(joinDF, "d_province_id", "conpot", dayId)
    IndexToRedis.fit(joinDF, "d_province_id", "resi_county_id", dayId)

  }

  /**
    * 初始化spark配置
    *  conf.setMaster("local")
    */
  override def init(): Unit = {
    //    conf.setMaster("local[8]")
    conf.set("spark.shuffle.file.buffer", "64k") // spark  shuffle  过程数据落地缓存内存大小
    conf.set("spark.reducer.maxSizeInFlight", "96m") //reduce去map中一次最多拉去多少数据
    conf.set("spark.shuffle.io.maxRetries", "10") //shuffle read task从shuffle write task所在节点拉取属于自己的数据时  重试次数
    conf.set("spark.shuffle.io.retryWait", "60s") //shuffle read task从shuffle write task所在节点拉取属于自己的数据时  等待时间

    //两个一起调节，剩下0.2是task运行时可以使用的内存
    conf.set("spark.shuffle.memoryFraction", "0.4") // shuffle  内存占比
    conf.set("spark.storage.memoryFraction", "0.4") //  RDD持久化可以使用的内存
    conf.set("spark.sql.shuffle.partitions", "10")


  }

  case class Province(mdn: String, source_county_id: String, d_province_id: String, d_stay_time: Double, d_max_distance: Double)

}
