package com.gitee.dufafei.spark.connector.hbase

import com.google.protobuf.Message
import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.client.coprocessor.{AggregationClient, LongColumnInterpreter}
import org.apache.hadoop.hbase.coprocessor.AggregateImplementation

/**
 * 协处理器操作
 */
class HBaseCop(client: HBaseClient) {

  import HBaseClient.implicits._

  def addAggregate(tableName: String): Unit = {
    val className = classOf[
      AggregateImplementation[_,  _,  _ <: Message, _ <: Message, _ <: Message]
    ].getName
    addCoprocessor(tableName, className)
  }

  /**
   * 添加协处理器
   * @param tableName 表名
   * @param className 协处理器类名
   */
  def addCoprocessor(tableName: String, className: String): Unit = {
    client.usingAdmin { admin =>
      admin.disableTable(tableName)
      val htd = admin.getTableDescriptor(tableName)
      htd.addCoprocessor(className)
      admin.modifyTable(tableName, htd)
      admin.enableTable(tableName)
    }
  }

  private def usingAggregate[A](execute: AggregationClient => A): A = {
    val agg = new AggregationClient(client.conn.getConfiguration)
    client.using(agg)(execute)
  }

  /**
   *
   * @param tableName 表名
   * @param scan Scan
   * @return 统计数据的总行数
   */
  def rowCount(tableName: String, scan: Scan): Long = {
    usingAggregate{ ac =>
      val cr = new LongColumnInterpreter
      ac.rowCount(tableName, cr, scan)
    }
  }

  def rowCount(tableName: String): Long = rowCount(tableName, new Scan())
}
