package com.neo.base.C03_program

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._

import org.slf4j.LoggerFactory
import com.typesafe.scalalogging.slf4j.Logger

class C03_02_createRdd {
  val logger = Logger(LoggerFactory.getLogger("WordCount"))
  def workTest(): Unit ={
    logger.info("创建一个Scala版本的Spark Context")
    val conf = new SparkConf().setMaster("local[8]").setAppName("Test")
    val sc = new SparkContext(conf)

    logger.info("读取我们的输入数据")
    //val lines = sc.parallelize(List("pandas","i like pandas"))
    val lines = sc.textFile("/home/hadoop/app/hadoop-2.6.5/logs/hadoop-hadoop-namenode-c64-dt.log")

    logger.info("转化操作")
    val infoRDD = lines.filter(line => line.contains("INFO"))

    logger.info("转化操作")
    val warnRDD  = lines.filter(line => line.contains("WARN"))

    logger.info("合并RDD(union)")
    val logRdd =  warnRDD.union(warnRDD)

    logger.info("行动操作")
    logger.info("Input had " + logRdd.count() + " concerning lines")
    logger.info("Here are 10 examples:")
    logRdd.take(10).foreach(println)

  }

}


object C03_02_createRdd{
  val logger = Logger(LoggerFactory.getLogger("WordCount"))

  def main(args: Array[String]): Unit = {
    val crdd = new C03_02_createRdd
    crdd.workTest()

  }

}