package com.shujia.tour

import com.shujia.spark.SparkTool
import com.shujia.util.IndexToHbase
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{HConnectionManager, Put}
import org.apache.spark.sql.hive.HiveContext

object TourProvinceIndex extends SparkTool {
  /**
    * 编写业务逻辑
    *
    */
  override def run(): Unit = {

    val day = day_id

    val hiveContext = new HiveContext(sc)


    //1、客流量按天
    val flowDF = hiveContext.sql(
      s"""
         |select d_province_id,count(1) as flow from
         |dal_tour.dal_tour_province_tourist_msk_d
         |where day_id=$day_id
         |group by d_province_id
         |
      """.stripMargin)

    flowDF.foreachPartition(rows => {
      //创建hbase连接
      val configuration: Configuration = new Configuration()
      configuration.set("hbase.zookeeper.quorum", Constants.HBASE_ZOOKEEPER)
      val connection = HConnectionManager.createConnection(configuration)
      val table = connection.getTable("province_index")

      rows.foreach(row => {
        val d_province_id = row.getAs[String]("d_province_id")
        val flow = row.getAs[Long]("flow")

        val rowkey = d_province_id + "_" + day

        val put = new Put(rowkey.getBytes())
        put.add("info".getBytes(), "flow".getBytes(), flow.toString.getBytes())

        table.put(put)
      })

      connection.close()

    })
    /**
      * 统计游客指标
      * 客流量按天 [省id,客流量]
      * 性别按天 [省id,性别,客流量]
      * 年龄按天 [省id,年龄,客流量]
      * 常住地按天 [省id,常住地市,客流量]
      * 归属地按天 [省id,归属地市,客流量]
      * 终端型号按天 [省id,终端型号,客流量]
      * 消费等级按天 [省id,消费等级,客流量]
      * 停留时长按天 [省id,停留时长,客流量]
      * 停留时长按天 [省id,出游距离,客流量]
      */

    var joinDF = hiveContext.sql(
      s"""
         |
         |select *  from
         |dal_tour.dal_tour_province_tourist_msk_d as a
         |join
         |dim.dim_usertag_msk_m as b
         |on a.mdn=b.mdn
         |where a.day_id=$day_id and b.month_id=201805
         |
         |
      """.stripMargin)

    //将join 放外面   先缓存一下
    joinDF = joinDF.cache()

    joinDF.registerTempTable("table")

    val fields = "gender,age,resi_county_id,number_attr,trmnl_brand,d_stay_time,d_max_distance,conpot,packg"

    for (field <- fields.split(",").toList) {
      IndexToHbase.toHbase(hiveContext, day, field, "province_index")
    }

  }

  /**
    * 初始化 设置 spark 运行参数
    *
    */
  override def init(): Unit = {
    conf.set("spark.sql.shuffle.partitions", "10")
  }
}
