package com.bocommlife.mi

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.storage.StorageLevel
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.TableName
import org.apache.spark.util.LongAccumulator

object StreamingCount {

  private var hello_accumulator: LongAccumulator = _
  private var hi_accumulator: LongAccumulator = _

  def main(args: Array[String]) {

    val conf = new SparkConf().setAppName("streaming couting ap").set("spark.yarn.jars", "hdfs://master:9000/spark_jars/*")
    val sc = new StreamingContext(conf, Seconds(1))

    hello_accumulator = sc.sparkContext.longAccumulator("hello accum")
    hi_accumulator = sc.sparkContext.longAccumulator("hi accum")
//    hello_accumulator = sc.sparkContext.accumulator(0, "hello")
//    hi_accumulator = sc.sparkContext.accumulator(0, "hi")

    val lines = sc.socketTextStream("129.1.9.46", 9999, StorageLevel.MEMORY_AND_DISK)

    val words = lines.flatMap(line => line.split(" "))

    val wordpairs = words.map(word => word match {
      case "hello" => (word, 1)
      case "hi" => (word, 1)
      case _ => ("misc", 1)
    })

    val wordcounts = wordpairs.reduceByKey(_ + _)
    val r = wordcounts.filter(pair => {
      println("=================================================================")
      println("pair key is " + pair._1)
      if (pair._1 == "hello") {
        val count = pair._2
        foo(count, hello_accumulator)
//        hello_accumulator.add(count)
        println("hello accumulator count is ... " + hello_accumulator.value)
        true
      } else if (pair._1 == "hi") {
        foo(pair._2, hi_accumulator)
//        hi_accumulator.add(pair._2)
        println("hi accumulator count is ... " + hi_accumulator.value)
        true
      } else {
        false
      }
    })

    println("=================================================================")
    println("hello accumulator count is ... " + hello_accumulator.value)
    println("hi accumulator count is ... " + hi_accumulator.value)
    println("=================================================================")

    //    wordcounts.foreachRDD(r => {
    //      println("=================================================================")
    //      println("hello accumulator count is ... " + hello_accumulator.value)
    //      println("hi accumulator count is ... " + hi_accumulator.value)
    //      println("=================================================================")
    //    })hdfs://129.1.9.47:9000/Hadoop/Input/wordcount.txt
//    r.saveAsTextFiles("hdfs://129.1.9.47:9000/Hadoop/Output/counttt", "txt")
//    r.foreachRDD(rdd => {
//      rdd.coalesce(1).saveas
//    })
        r.print()

    sc.start()
    sc.awaitTermination()
  }
  
  def foo(count: Long, accum: LongAccumulator){
    accum.add(count)
  }
}