package com.gxlevi.batch_process.util

import com.alibaba.fastjson.JSONObject
import org.apache.flink.api.java.tuple.Tuple2
import org.apache.hadoop.hbase.client.{Result, Scan}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{Cell, CellUtil}

/**
 * flink整合hbase的工具类
 *
 * 1. 继承TableInputFormat<T extends Tuple>,需要指定泛型
 * 2. 泛型是Java的Tuple org.apache.flink.api.java.tupl
 * 3.Tuple2中第一个元素装载rowkey,第二个元素装 列名列值的JSON字符串
 * 4.实现抽象方法
 * getScanner:返回scan对象,父类中已经定义了scan,在此方法中需要为父类的scan赋值
 * getTableName:返回表名,可以在类的构造方法中传递表名
 * mapResultToTuple:转换HBase中取到的Result进行转换为Tuple
 *      a.取rowkey
 *      b.取cell对象
 *      c.遍历数组取列名和列值
 *      d.构造JSONObject
 *      e.构造Tuple
 */
class HBaseTableInputFormat(var tableName: String) extends MyTableInputFormat[Tuple2[String, String]] {
  override def getScanner: Scan = {
    scan = new Scan()
    scan
  }

  override def getTableName: String = {
    tableName
  }

  override def mapResultToTuple(result: Result): Tuple2[String, String] = {
    val rowkey = Bytes.toString(result.getRow)
    val cells: Array[Cell] = result.rawCells()

    //定义JSONObject
    val jsonObject = new JSONObject()
    for (elem <- cells) {
      val colName: String = Bytes.toString(CellUtil.cloneQualifier(elem))
      val colValue: String = Bytes.toString(CellUtil.cloneValue(elem))

      jsonObject.put(colName, colValue)
    }

    new Tuple2[String, String](rowkey, jsonObject.toString)
  }
}
