package day5

import Utils.SparkUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.{HBaseConfiguration, TableName}
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory, Put}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.SparkContext

object Test1_Hbase {
  def main(args: Array[String]): Unit = {
    // 目标： 将 spark 中的数据插入到 hbase 中
    /*val sc: SparkContext = SparkUtils.getSparkContext(4, "ceshi")
    val rdd1 = sc.makeRDD(List((1,"苹果",20),(2,"香蕉",22),(3,"火龙果",23)))
    rdd1.foreachPartition(iter => {
      // val conf = HBaseConfiguration.create();
      val conf = new Configuration()
      conf.set("hbase.zookeeper.quorum","hadoop11,hadoop12,hadoop13")
      val conn: Any = ConnectionFactory.createConnection(conf)
      val table = conn.getTable(TableName.valueOf("biaoming"))

      iter.foreach(v => {
        val put = new Put(Bytes.toBytes(v._1))
        put.addColumn(Bytes.toBytes("info"),Bytes.toBytes("name"),Bytes.toBytes(v._2))
        put.addColumn(Bytes.toBytes("info"),Bytes.toBytes("price"),Bytes.toBytes(v._3))
        table
      })

      conn.close()

    })
    sc.stop()*/
  }
}
