package com.shujia.onhbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{HConnection, HConnectionManager, Put}
import org.apache.spark.{SparkConf, SparkContext}

object SparkToHbase {
  def main(args: Array[String]): Unit = {


    val conf = new SparkConf().setAppName("sql").setMaster("local")
    val sc = new SparkContext(conf)
    val rdd = sc.textFile("spark/data/students.txt")


    rdd.foreach(line => {
      //如果在这里创建连接
      //每一条数据都会创建一个连接
    })


    rdd.foreachPartition(iter => {

      /**
        * 创建hbase连接 将数据写hbase
        *
        * foreachPartition  每一个分区创建一个连接
        *
        *
        * 网络连接不能再网络中传输  不能序列化
        *
        */
      val configuration: Configuration = new Configuration()
      configuration.set("hbase.zookeeper.quorum", "node1:2181,node2:2181,node3:2181")
      val connection = HConnectionManager.createConnection(configuration)

      val student = connection.getTable("student1")

      //将数据插入hbase
      iter.foreach(line => {
        val split = line.split(",")

        val id = split(0)
        val name = split(1)

        val put = new Put(id.getBytes())
        put.add("info".getBytes(), "name".getBytes(), name.getBytes())

        student.put(put)
      })

      connection.close()
    })
  }
}
