package com.spark.util.client

import com.spark.util.core.{Borrow, HBaseIOSupport, Logging}
import com.google.protobuf.Message
import com.spark.util.utils.PropertiesUtil
import org.apache.hadoop.hbase.client.coprocessor.AggregationClient
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory, Scan}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HColumnDescriptor, HTableDescriptor, NamespaceDescriptor, TableName}
import org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter
import org.apache.hadoop.hbase.coprocessor.AggregateImplementation

object HBaseClient extends HBaseIOSupport with Borrow with Logging {

  override val quorum: String = PropertiesUtil.getString("hbase.zookeeper.quorum")
  override val port: String = PropertiesUtil.getString("hbase.zookeeper.port")
  override val parent: String = PropertiesUtil.getString("hbase.zookeeper.parent")
  override val conn: Connection = ConnectionFactory.createConnection(getHBaseConf)

  // 建立表空间
  def createNameSpace(name:String):Unit= {
    val namespaceDescriptor = NamespaceDescriptor.create(name).build()
    using(conn.getAdmin){ admin => admin.createNamespace(namespaceDescriptor) }
  }

  // 建立普通表
  def createTable(tableNameStr:String, families:Array[String]): Unit = {
    val tableName = TableName.valueOf(tableNameStr)
    val tableDesc = new HTableDescriptor(tableName)
    families.foreach{ x =>
      val family = new HColumnDescriptor(x)
      tableDesc.addFamily(family)
    }
    using(conn.getAdmin){ admin => admin.createTable(tableDesc) }
  }

  // 建立分区表 分区个数建议设置成regionServer的倍数（负载均衡） rowKey前缀格式：s"number|"
  def createPartitionTable(tableNameStr: String, familyNames:Array[String], partitionNum:Int): Unit = {
    val tableName = TableName.valueOf(tableNameStr)
    val tableDesc = new HTableDescriptor(tableName)
    familyNames.foreach{ x =>
      val family = new HColumnDescriptor(x)
      tableDesc.addFamily(family)
    }
    val splitsKey = new Array[Array[Byte]](partitionNum-1)
    splitsKey.indices.foreach{x => splitsKey(x) = Bytes.toBytes((x+1)+"|") }
    using(conn.getAdmin){ admin => admin.createTable(tableDesc,splitsKey) }
  }

  // 删除表
  def deleteTable(tableNameStr:String): Unit = {
    val tableName = TableName.valueOf(tableNameStr)
    using(conn.getAdmin){ admin =>
      admin.disableTable(tableName)
      admin.deleteTable(tableName)
    }
  }

  def addCoprocessor(tableNameStr:String): Unit = {
    using(conn.getAdmin){ admin =>
      val tableName = TableName.valueOf(tableNameStr)
      admin.disableTable(tableName)
      val htd = admin.getTableDescriptor(tableName)
      htd.addCoprocessor(classOf[AggregateImplementation[_,  _, _ <: Message,  _ <: Message, _ <: Message]].getName)
      admin.modifyTable(tableName,htd)
      admin.enableTable(tableName)
    }
  }

  // 添加协处理器统计表行数
  def rowCount(tableNameStr:String, scan:Scan): Long = {
    // 第一步开启协处理器 即调用addCoprocessor
    val tableName = TableName.valueOf(tableNameStr)
    val columnInterpreter = new LongColumnInterpreter
    using(new AggregationClient(getHBaseConf)){ aggregationClient =>
      aggregationClient.rowCount(tableName, columnInterpreter, scan)
    }
  }
}
