package com.shujia.tour

import com.shujia.utils.{Config, SparkTool}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{HConnectionManager, Put}
import org.apache.spark.sql.hive.HiveContext

/**
  *
  * 需求矩阵  需要计算的指标
  *
  * 根据市游客表计算如下指标
  *
  * 客流量按天 [市id,客流量]
  * 性别按天 [市id,性别,客流量]
  * 年龄按天 [市id,年龄,客流量]
  * 常住地按天 [市id,常住地市,客流量]
  * 归属地按天 [市id,归属地市,客流量]
  * 终端型号按天 [市id,终端型号,客流量]
  * 消费等级按天 [市id,消费等级,客流量]
  * 停留时长按天 [市id,停留时长,客流量]
  * 停留时长按天 [市id,出游距离,客流量]
  *
  *
  * 查询需求：
  * 1、查询某个省某一天的数据
  *
  * 存hbase  : rowkey怎末设计，表怎么设计
  *
  *
  * 1、表设计
  * 省和市分别一张表
  *
  * create 'city_index','info'
  *
  *
  * 2、rowkey设计
  * 市编号+时间
  *
  * 3、列设计
  * 每一个列保存一种指标
  *
  */

object CityTouristIndexJob extends SparkTool {
  /**
    * 子类实现此方法，实现具体的代码逻辑
    *
    */
  override def run(args: Array[String]): Unit = {
    if (args.length == 0) {
      log.error("请指定时间参数：")
      //抛出异常
      throw new RuntimeException("请指定时间参数：day_id")
    }
    val day_id = args(0)
    log.info(s"当前处理的时间分区为：$day_id")

    val cityTableName = Config.get("city.tourist.table.name")


    val hbaseTableName = Config.get("city.tourist.index.hbase.table.name")

    val usertagTableName = Config.get("usertag.table.name")

    //通过hiveContext读取hive中的表
    val hive = new HiveContext(sc)

    //1、客流量按天 [市id,客流量]

    val df = hive.sql(
      s"""
         |select d_city_id,count(1) as c
         |from $cityTableName
         |where day_id=$day_id
         |group by d_city_id
         |
      """.stripMargin)


    //循环一个分区，每一个分区创建一个链接
    df.foreachPartition(rows => {

      //创建hbase链接
      //创建hadoop配置文件对象
      val configuration: Configuration = new Configuration

      //指定zk地址
      configuration.set("hbase.zookeeper.quorum", Config.get("hbase.zookeeper.quorum"))

      //建立连接   和zk的连接
      val connection = HConnectionManager.createConnection(configuration)

      val table = connection.getTable(hbaseTableName)


      rows.foreach(row => {
        val dCityId = row.getAs[String]("d_city_id")
        val flow = row.getAs[Long]("c")

        val rowkey = dCityId + "_" + day_id

        val put = new Put(rowkey.getBytes())
        put.add("info".getBytes(), "flow".getBytes(), flow.toString.getBytes())

        table.put(put)
      })

      //关闭连接
      connection.close()

    })

    /**
      * 由于所有的指标都需要进行关联,将关联的操作提取出来
      *
      */

    var joinDF = hive.sql(
      s"""
         |select *
         |from $cityTableName as a join $usertagTableName as b
         |on a.mdn=b.mdn
         |where a.day_id='$day_id' and b.month_id='201805'
         |
      """.stripMargin)


    //对多次被使用的df进行缓存
    joinDF = joinDF.cache()

    joinDF.registerTempTable("join_table")

    //2、性别按天 [市id,性别,客流量]
    indexToHbase(hive, hbaseTableName, day_id, "gender")
    //年龄按天 [市id,年龄,客流量]
    indexToHbase(hive, hbaseTableName, day_id, "age")
    //常住地按天 [市id,常住地市,客流量]
    indexToHbase(hive, hbaseTableName, day_id, "resi_county_id")
    //归属地按天 [市id,归属地市,客流量]
    indexToHbase(hive, hbaseTableName, day_id, "number_attr")
    // 终端型号按天 [市id,终端型号,客流量]
    indexToHbase(hive, hbaseTableName, day_id, "trmnl_brand")
    // 消费等级按天 [市id,消费等级,客流量]
    indexToHbase(hive, hbaseTableName, day_id, "conpot")
    // 停留时长按天 [市id,停留时长,客流量]
    indexToHbase(hive, hbaseTableName, day_id, "d_stay_time")
    // 停留时长按天 [市id,出游距离,客流量]
    indexToHbase(hive, hbaseTableName, day_id, "d_max_distance")

  }


  def indexToHbase(hive: HiveContext, hbaseTableName: String, day_id: String, column: String): Unit = {

    val genderFlow = hive.sql(
      s"""
         |select d_city_id , $column, count(*) as c
         |from join_table
         |group by d_city_id , $column
         |
      """.stripMargin)


    val resultRDD = genderFlow.map(row => {
      val cityId = row.getAs[String]("d_city_id")
      val c = row.getAs[Any](column)
      val flow = row.getAs[Long]("c")
      val value = c + ":" + flow

      (cityId, value)
    }).reduceByKey((x, y) => x + "|" + y)


    resultRDD.foreachPartition(lines => {

      //创建hbase链接
      //创建hadoop配置文件对象
      val configuration: Configuration = new Configuration

      //指定zk地址
      configuration.set("hbase.zookeeper.quorum", Config.get("hbase.zookeeper.quorum"))

      //建立连接   和zk的连接
      val connection = HConnectionManager.createConnection(configuration)

      val table = connection.getTable(hbaseTableName)


      lines.foreach(line => {
        val dCityId = line._1
        val value = line._2

        val rowkey = dCityId + "_" + day_id

        val put = new Put(rowkey.getBytes())
        put.add("info".getBytes(), s"${column}_flow".getBytes(), value.getBytes())

        table.put(put)
      })

      //关闭连接
      connection.close()
    })
  }

  /**
    * 指定spark配置
    * conf.set(key,value)
    */
  override def init(): Unit = {
    conf.set("spark.sql.shuffle.partitions", "10")
  }
}
