package com.bocommlife.mi

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions

object WordCount {

  /*
   * spark-submit --class com.bocommlife.mi.WordCount --master yarn-client ~/spark-wordcount-scala-0.0.1-SNAPSHOT.jar hdfs://129.1.9.47:9000/Hadoop/Input/nohup.out hdfs://129.1.9.47:9000/Hadoop/Input/nohup2.out
4.xxxx s
(UNKNOWN,209134)
(ERROR,1307)
(INFO,56307)
(WARN,82)
(DEBUG,80796)
first: 10.159335 s second: 10.972539 s 
(UNKNOWN,1463938)
(DEBUG,565572)
(INFO,394149)
(WARN,574)
(ERROR,9149)
   * 
   * 
   */
  def main(args: Array[String]) {
    val conf = new SparkConf().setAppName("Jimmy's first spark app").set("spark.yarn.jars", "hdfs://master:9000/spark_jars/*")
    val sc = new SparkContext(conf)

//    val fileName = args(0)
    val fileName = "hdfs://129.1.9.47:9000/Hadoop/Input/wordcount.txt"
    val lines = sc.textFile(fileName)
    //    for (i <- (1 to 6)) {
    //      val tmp = sc.textFile("hdfs://master:9000/Hadoop/Input/nohup2.out")
    //      lines.union(tmp)
    //    }
    val lines2 = sc.textFile("hdfs://129.1.9.47:9000/Hadoop/Input/wordcount2.txt")
//    val lines2 = sc.textFile("hdfs://master:9000/Hadoop/Input/nohup_sample2.out")
//    val lines3 = sc.textFile("hdfs://master:9000/Hadoop/Input/nohup_sample3.out")
//    val lines4 = sc.textFile("hdfs://master:9000/Hadoop/Input/nohup_sample4.out")
//    val lines5 = sc.textFile("hdfs://master:9000/Hadoop/Input/nohup_sample5.out")
//    val lines6 = sc.textFile("hdfs://master:9000/Hadoop/Input/nohup_sample6.out")

    //    for (e <- (1 to 6)){
    //      
    //    }
    //    val words = lines.flatMap(s => s.split(" ")).count()
    //    val words = lines.union(lines2).union(lines3).union(lines4).union(lines4).union(lines5).union(lines6)
    //      //      .flatMap(s => s.split(" "))
    //      //      .filter(line => line == "INFO" || line == "ERROR" || line == "WARN" || line == "DEBUG")
    //      .filter(line => {
    //        line.indexOf("INFO") > 0 &&
    //        line.indexOf("INFO") < 40
    //      })
    //      //      .flatMap(line => line.split(" |-"))
    //      //      .filter(word => word == "INFO")
    //      .map(w => ("INFO", 1)).reduceByKey(_ + _)

    val words2 = lines.union(lines2)//.union(lines3).union(lines4).union(lines4).union(lines5).union(lines6)
    words2.flatMap(line => Array(line)).coalesce(1).saveAsTextFile("hdfs://master:9000/Hadoop/Input/result.out")

//    words2.map(line => {
//      if (line.indexOf("INFO") > 0 && line.indexOf("INFO") < 40) {
//        ("INFO", 1)
//      } else if (line.indexOf("DEBUG") > 0 && line.indexOf("DEBUG") < 40) {
//        ("DEBUG", 1)
//      } else if (line.indexOf("WARN") > 0 && line.indexOf("WARN") < 40) {
//        ("WARN", 1)
//      } else if (line.indexOf("ERROR") > 0 && line.indexOf("ERROR") < 40) {
//        ("ERROR", 1)
//      } else {
//        ("UNKNOWN", 1)
//      }
//    }).reduceByKey(_ + _);
//    //    words.foreach((kv) => println("words: " + kv._1 + " occurance: " + kv._2))
//    words2.collect().foreach((kv) => println(kv))

    //    words.foreachPartition(wordOccurancesPair => {
    //      // hbase
    //      val hbaseConf = HBaseConfiguration.create()
    //      hbaseConf.set("hbase.zookeeper.quorum", "129.1.9.38,129.1.9.39")
    //      hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
    //      hbaseConf.set("hbase.defaults.for.version.skip", "true")
    //
    //      val hbaseConn = ConnectionFactory.createConnection(hbaseConf)
    //      val table = hbaseConn.getTable(TableName.valueOf("users"))
    //      wordOccurancesPair.foreach(pair => {
    //        var put = new Put(Bytes.toBytes(pair._1.toString))
    //        put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("occurance"), Bytes.toBytes(pair._2))
    //        table.put(put)
    //      })
    //    })
    //    
    //    words.foreach((kv) => println("words: " + kv._1 + " occurance: " + kv._2))
  }
}