package com.shujia.util

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{HConnectionManager, Put}
import org.apache.spark.sql.hive.HiveContext

object IndexToHbase {

  def toHbase(hiveContext: HiveContext, day_id: String, clume: String): Unit = {
    hiveContext.sql(
      s"""
         |
         |select
         |    a.d_province_id,a.$clume,count(1) as c
         |from
         |    join_table as a
         |group by
         |    a.d_province_id,a.$clume
         |
      """.stripMargin)
      .map(row => {
        val d_province_id = row.getAs[String]("d_province_id")
        val c = row.getAs[String](clume)
        val count = row.getAs[Long]("c")
        (d_province_id, c + ":" + count)
      }).reduceByKey(_ + "|" + _) //将多个列的数据拼接在一起

      .foreachPartition(iter => {
      //创建hbase连接  将数据写入hbase
      val conf: Configuration = new Configuration()
      conf.set("hbase.zookeeper.quorum", Config.getString("hbase.zookeeper.quorum"))
      val connection = HConnectionManager.createConnection(conf)

      //获取表连接
      //create 'province_index','info'
      val table = connection.getTable("province_index")

      iter.foreach(kv => {
        val d_province_id = kv._1
        val count = kv._2

        //以 省编号和时间作为rowkey
        val rowkey = d_province_id + "_" + day_id
        val put = new Put(rowkey.getBytes())
        put.add("info".getBytes, clume.getBytes, count.getBytes())
        table.put(put)
      })
      connection.close()
    })
  }

}
