package com.shujia.flink.sink

import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.functions.sink.{RichSinkFunction, SinkFunction}
import org.apache.flink.streaming.api.scala._
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory, Put, Table}
import redis.clients.jedis.Jedis

object Demo6HbaseSInk {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val linesDS: DataStream[String] = env.socketTextStream("master", 8888)

    //统计单词的数量
    val countDS: DataStream[(String, Int)] = linesDS
      .flatMap(_.split(","))
      .map((_, 1))
      .keyBy(_._1)
      .sum(1)

    //将解雇我保存到Hbase中
    countDS.addSink(new RichSinkFunction[(String, Int)] {


      var con: Connection = _
      var table: Table = _

      override def open(parameters: Configuration): Unit = {

        //创建hbase的连接
        // 创建连接需要给定连接信息，所以需要给定配置类
        val conf = new org.apache.hadoop.conf.Configuration()

        // 由于对HBASE进行操作时，需要连接其Zookeeper 那么需要传入Zookeeper的IP
        conf.set("hbase.zookeeper.quorum", "node1,node2,master")

        // 获取连接对象 通过ConnectionFactory来创建连接对象
        con = ConnectionFactory.createConnection(conf)

        //获取表对象
        //需要先在hbase中创建表，create 'word_count','info'
        table = con.getTable(TableName.valueOf("word_count"))

      }

      override def close(): Unit = {
        con.close()
      }

      override def invoke(value: (String, Int), context: SinkFunction.Context): Unit = {
        val word: String = value._1
        val count: Int = value._2
        //构建put对象
        val put = new Put(word.getBytes())
        //增加列
        put.addColumn("info".getBytes(), "count".getBytes(), count.toString.getBytes())

        //插入数据
        table.put(put)
      }
    })

    env.execute()

  }

}
