package com.bocommlife.mi

import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions

object WordCountToHBase {

  def main(args: Array[String]) {
    val conf = new SparkConf().setAppName("Jimmy's first spark app").set("spark.yarn.jars", "hdfs://master:9000/spark_jars/*")
    val sc = new SparkContext(conf)

    val lines = sc.textFile(args(0))

    val words = lines
      .flatMap(s => s.split(" "))
      .map(w => (w, 1)).reduceByKey(_ + _)

    words.foreachPartition(wordOccurancesPair => {
      // hbase
      val hbaseConf = HBaseConfiguration.create()
      hbaseConf.set("hbase.zookeeper.quorum", "129.1.9.38,129.1.9.39")
      hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
      hbaseConf.set("hbase.defaults.for.version.skip", "true")

      val hbaseConn = ConnectionFactory.createConnection(hbaseConf)
      val table = hbaseConn.getTable(TableName.valueOf("users"))
      wordOccurancesPair.foreach(pair => {
        var put = new Put(Bytes.toBytes(pair._1.toString))
        put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("occurance"), Bytes.toBytes(pair._2))
        table.put(put)
      })
    })
    
    words.collect().foreach((kv) => println(kv))
  }
}