package com.shujia.util

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{HConnectionManager, Put}
import org.apache.spark.sql.hive.HiveContext

object IndexToHbase {

  /**
    * 指标写入hbase
    *
    * @param hiveContext
    * @param day_id
    * @param field
    */
  def toHbase(hiveContext: HiveContext, day_id: String, field: String, hbaseTable: String): Unit = {
    //性别按天 [省id,性别,客流量]

    hiveContext.sql(
      s"""
         |
         |select d_province_id,$field,count(1) as flow from
         |table
         |group by d_province_id,$field
         |
         |
      """.stripMargin)
      .map(row => {
        val d_province_id = row.getAs[String]("d_province_id")
        val f = row.getAs[Object](field)
        val flow = row.getAs[Long]("flow")

        (d_province_id, f + ":" + flow)
      })
      .reduceByKey((x, y) => x + "|" + y) //字符串拼接
      .foreachPartition(lines => {
      //创建hbase连接
      val configuration: Configuration = new Configuration()
      configuration.set("hbase.zookeeper.quorum", "node2:2181,node3:2181,node4:2181")
      val connection = HConnectionManager.createConnection(configuration)
      val table = connection.getTable(hbaseTable)

      lines.foreach(line => {
        val d_province_id = line._1
        val data = line._2

        val rowkey = d_province_id + "_" + day_id

        val put = new Put(rowkey.getBytes())
        put.add("info".getBytes(), s"flow_$field".getBytes(), data.getBytes())

        table.put(put)

      })

      connection.close()
    })


  }
}
