package hivetohbase_scala

import hivetohbase_scala.HbaseSpark.nullHandle
import org.apache.hadoop.hbase.{KeyValue, TableName}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types.StructField

import scala.collection.mutable.ArrayBuffer

object ReadHive2Hbase {

  def processHive2Hbase(dbName: String, sql: String, hBaseColumnFamily: String): Unit = {
    val hadoopUrl = "hdfs://192.168.1.171:9000"
    val hiveUrl = "hdfs://192.168.1.171:9000/user/hive/warehouse"
    val hbaseUrl = "192.168.1.171:2181"
    val sysUser = "work"
    val hBaseTableName = "hive_hbase_1:bol_dw_main_material_place_order"
    val tmpDir = "/tmp/test-hbase" //临时文件保存位置，在hdfs上

    /** *********************************hive *********************************************/
    //获取hive的数据
    val res: DataFrame = HiveSpark.getColumnsInfo("test_hive", "", hiveUrl, sysUser)
    val hBaseColumnFamily = "cf1"
    res.show()
    //处理hive表的数据
    val resRdd: RDD[(ImmutableBytesWritable, KeyValue)] = HiveSpark.processHiveInfo(res, hBaseColumnFamily)

    //准备hbase表
    //配置hbase的表名
    val tableName: TableName = TableName.valueOf(hBaseTableName)
    //将数据写入写入hbase
    HbaseSpark.write2Hbase(hbaseUrl, resRdd, hBaseColumnFamily, tableName, tmpDir, hadoopUrl, sysUser)

    HiveUtils.hiveConnect(hiveUrl, sysUser).close()
  }

  def main(args: Array[String]): Unit = {

  }

}
