package com.gitee.dufafei.spark.connector.hbase

import org.apache.hadoop.hbase.client.Connection
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HTableDescriptor, TableName}

import scala.util.Random

/**
 * HBase分区操作
 */
class HBasePart(client: HBaseClient) {

  /**
   * 建立分区表
   * @param tableName 表名
   * @param families 列簇
   * @param partitionNum 分区个数 分区个数建议设置成regionServer的倍数(负载均衡)
   */
  def createPartitionTable(connection: Connection,
                           tableName: String,
                           families: Array[String],
                           partitionNum: Int): Unit = {
    val tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName))
    client.addFamilies(tableDescriptor, families)
    val splitsKey = new Array[Array[Byte]](partitionNum-1)
    splitsKey.indices.foreach{x => splitsKey(x) = Bytes.toBytes((x + 1)+"|") }
    client.usingAdmin(admin => admin.createTable(tableDescriptor, splitsKey))
  }

  /**
   * 分区表的rowKey加上前缀
   * @param rowKey RowKey
   * @param partitionNum 分区个数
   * @return
   */
  def getPartitionRowKey(rowKey: String, partitionNum: Int): String = {
    val random = Random.nextInt(partitionNum)
    s"$random|"+ rowKey
  }
}
