package com.atguigu.core

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}


object RDD2Hbase {
  def main(args: Array[String]): Unit = {

    // 初始化sc
    val conf: SparkConf = new SparkConf().setAppName("WordCount").setMaster("local[*]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("ERROR")

    // 从hbase表读取数据
    /*val configuration: Configuration = HBaseConfiguration.create()
    configuration.set("hbase.zookeeper.quorum", "hadoop102,hadoop103,hadoop104")
    configuration.set(TableInputFormat.INPUT_TABLE, "bigfactory")

    val rdd: RDD[(ImmutableBytesWritable, Result)] = sc.newAPIHadoopRDD(configuration, classOf[TableInputFormat], classOf[ImmutableBytesWritable], classOf[Result])


    rdd.foreach(x => {
      val cells: Array[Cell] = x._2.rawCells()

      cells.foreach(cell => {
        val rowkey: String = Bytes.toString(CellUtil.cloneRow(cell))
        val family: String = Bytes.toString(CellUtil.cloneFamily(cell))
        val column: String = Bytes.toString(CellUtil.cloneQualifier(cell))
        val value: String = Bytes.toString(CellUtil.cloneValue(cell))

        println(s"==$rowkey===$family=====$column======$value==")
      })

    })*/


    // rdd数据写入到hbase表
    val rdd: RDD[(String, String, String)] = sc.makeRDD(List(("9", "sss", "9")))

    val rdd2 = rdd.map(x => {
      val put = new Put(Bytes.toBytes(x._1))
      put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("name"), Bytes.toBytes(x._2))
      put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("rank"), Bytes.toBytes(x._3))
      (new ImmutableBytesWritable(), put)
    })

    // 创建配置
    val configuration: Configuration = HBaseConfiguration.create()
    configuration.set("hbase.zookeeper.quorum", "hadoop102,hadoop103,hadoop104")
    configuration.set(TableOutputFormat.OUTPUT_TABLE, "bigfactory")

    // 设置OutputFormat类型
    val job: Job = Job.getInstance(configuration)
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])

    rdd2.saveAsNewAPIHadoopDataset(job.getConfiguration)

    sc.stop()
  }

}
