package com.shujia.tour

import com.shujia.util.{Config, IndexToHbase, SparkTool}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{HConnectionManager, Put}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.sql.hive.HiveContext

object MakeProvinceIndexApp extends SparkTool {
  /**
    * 编写spark业务逻辑
    *
    */
  override def run(args: Array[String]): Unit = {

    val day = day_id

    /**
      *
      * 客流量按天 [省id,客流量]
      * 性别按天 [省id,性别,客流量]
      * 年龄按天 [省id,年龄,客流量]
      * 常住地按天 [省id,常住地市,客流量]
      * 归属地按天 [省id,归属地市,客流量]
      * 终端型号按天 [省id,终端型号,客流量]
      * 消费等级按天 [省id,消费等级,客流量]
      * 停留时长按天 [省id,停留时长,客流量]
      *
      */

    //HiveContext  在集群中才能获取到数据
    val hiveContext = new HiveContext(sc)

    val provinceTable = "dal_tour.dal_tour_province_tourist_msk_d"


    hiveContext.sql(
      s"""
         |select
         |   d_province_id,count(1) as c
         |from
         |   $provinceTable
         |where
         |   day_id='$day'
         |group by
         |   d_province_id
         |
         |
      """.stripMargin)
      .foreachPartition(iter => {
        //创建hbase连接  将数据写入hbase
        val conf: Configuration = new Configuration()
        conf.set("hbase.zookeeper.quorum", Config.getString("hbase.zookeeper.quorum"))
        val connection = HConnectionManager.createConnection(conf)

        //获取表连接
        val table = connection.getTable("province_index")

        iter.foreach(row => {
          val d_province_id = row.getAs[String]("d_province_id")
          val count = row.getAs[Long]("c")

          //以 省编号和时间作为rowkey
          val rowkey = d_province_id + "_" + day
          val put = new Put(rowkey.getBytes())
          put.add("info".getBytes, "flow".getBytes, Bytes.toBytes(count))
          table.put(put)
        })
        connection.close()
      })


    /**
      *
      * 客流量按天 [省id,客流量]
      * 性别按天 [省id,性别,客流量]
      * 年龄按天 [省id,年龄,客流量]
      * 常住地按天 [省id,常住地市,客流量]
      * 归属地按天 [省id,归属地市,客流量]
      * 终端型号按天 [省id,终端型号,客流量]
      * 消费等级按天 [省id,消费等级,客流量]
      * 停留时长按天 [省id,停留时长,客流量]
      *
      */

    val joinDF = hiveContext.sql(
      s"""
         |select
         |    *
         |from
         |   $provinceTable as a
         |join
         |   dim.dim_usertag_msk_m as b
         |on
         |   a.mdn=b.mdn
         |where
         |   b.month_id='201805' and a.day_id='$day_id'
         |
      """.stripMargin)

    joinDF.cache().registerTempTable("join_table")


    IndexToHbase.toHbase(hiveContext, day_id, "gender")
    IndexToHbase.toHbase(hiveContext, day_id, "age")
    IndexToHbase.toHbase(hiveContext, day_id, "resi_county_id")
    IndexToHbase.toHbase(hiveContext, day_id, "number_attr")
    IndexToHbase.toHbase(hiveContext, day_id, "trmnl_brand")
    IndexToHbase.toHbase(hiveContext, day_id, "conpot")
    IndexToHbase.toHbase(hiveContext, day_id, "d_stay_time")


  }

  /**
    * 初始化方法，在子类设置spark运行时需要的参数
    * conf.setMaster("local")
    */
  override def init(): Unit = {
    conf.set("spark.sql.shuffle.partitions", "10")
  }
}
