package hivetohbase_scala

import java.net.URI
import java.sql.Timestamp

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hbase.{HColumnDescriptor, HTableDescriptor, KeyValue, TableName}
import org.apache.hadoop.hbase.client.{Admin, Connection, RegionLocator, Table}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, LoadIncrementalHFiles}
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.rdd.RDD


object HbaseSpark {

  def nullHandle(str: String): String = {
    if (str == null || "".equals(str)) {
      "NULL"
    } else {
      str
    }
  }


  /** *********************************hbase *********************************************/

  def write2Hbase(hbaseUrl: String,
                  resRdd: RDD[(ImmutableBytesWritable, KeyValue)],
                  hBaseColumnFamilyName: String,
                  tableName: TableName,
                  tmpDir: String,
                  hadoopUrl: String,
                  sysUser:String
                 ) = {

    //1.准备环境
    val hbaseConf: Configuration = HbaseUtils.hbaseConf(hbaseUrl)
    val hbaseConn: Connection = HbaseUtils.hbaseConnect(hbaseConf)
    val admin: Admin = hbaseConn.getAdmin


    val hconf = new Configuration()
    hconf.set("hbase.master", hadoopUrl)


    //由于生成Hfile文件的目录必须是不存在的，所以如果存在需要将其删除
    val fs: FileSystem = FileSystem.get(new URI(hadoopUrl), hconf, sysUser)
    if (fs.exists(new Path(tmpDir))) {
      println("删除临时文件夹")
      fs.delete(new Path(tmpDir), true)
    }

    // 2. Save Hfiles on HDFS
    //根据hbase的表名获取表
    val table: Table = hbaseConn.getTable(tableName)

    //创建一个hadoop的mapreduce的job
    val job = Job.getInstance(hbaseConf)

    //设置job名称，
    job.setJobName("toHbase")

    //需要设置文件输出的key，因为要生成Hfile，
    //所以outkey要用ImmutableBytesWritable
    job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setMapOutputValueClass(classOf[KeyValue])

    //配置HFileoutPutFormat2的信息
    //        HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator)
    HFileOutputFormat2.configureIncrementalLoadMap(job, table)

    //为了预防hfile文件数过多无法进行导入，设置该参数值
    hbaseConf.setInt("hbase.mapreduce.bulkload.max.hfiles.perRegion.perFamily", 5000)

    //此处运行完成之后，在tmpdir生成的Hfile文件
    resRdd.saveAsNewAPIHadoopFile(tmpDir,
      classOf[ImmutableBytesWritable],
      classOf[KeyValue],
      classOf[HFileOutputFormat2],
      hbaseConf)



    //判断Hbase表是否存在，不存在就新创建一个
    if (!admin.tableExists(tableName)) {
      val hTableDesc = new HTableDescriptor(tableName)
      val hColumnDesc = new HColumnDescriptor(hBaseColumnFamilyName)
      hTableDesc.addFamily(hColumnDesc)
      admin.createTable(hTableDesc)
    }


    //  3. Bulk load Hfiles to Hbase
    //开始将HFile导入到Hbase，此处都是hbase的api操作
    val load: LoadIncrementalHFiles = new LoadIncrementalHFiles(hbaseConf)
    //获取hbase表的region分布
    val regionLocator: RegionLocator = hbaseConn.getRegionLocator(tableName)
    //开始导入
    load.doBulkLoad(new Path(tmpDir), admin, table, regionLocator)


    table.close()
    hbaseConn.close()


    println("ok")


  }

}
