import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by Administrator on 2016/3/18.
  */
object WordCount {

  val conf = new SparkConf()
    .setAppName("WordCount")
    .setMaster("local")

  val sc = new SparkContext(conf)

  def printByLine() = {
    val lines = sc.textFile("file:///D:/LICENSE")
    lines.collect().foreach(println)
  }

  def printByWord1() = {
    val lines = sc.textFile("file:///D:/1.txt")
    lines.map(line => line.split("\\s"))
      .collect().foreach(_.foreach(println))
  }

  def printByWord2() = {
    val lines = sc.textFile("file:///D:/1.txt")
    lines.flatMap(line => line.split("\\s")).collect().foreach(println)
  }

  def sortWordCount() = {
    val lines = sc.textFile("file:///D:/1.txt")
    lines.flatMap(_.split("\\s")).map((_, 1)).reduceByKey(_ + _)
      .map(x => (x._2, x._1)).sortByKey(true).map(x => (x._2, x._1))
      .saveAsTextFile("file:///D:/result")
  }

  def groupByKey() = {
    val lines = sc.textFile("file:///D:/1.txt")
    lines.flatMap(_.split("\\s")).map((_, 1)).groupByKey()
      .collect().foreach(println)
  }

  def join() = {
    val rdd1 = sc.parallelize(List(('a', 1), ('a', 2), ('b', 1), ('b', 2)))
    val rdd2 = sc.parallelize(List(('a', 3), ('a', 4), ('b', 3), ('b', 4)))

    rdd1.join(rdd2).collect().foreach(println)
  }

  def reduce() = {
    val rdd = sc.parallelize(List(1, 2, 3, 4, 5))
    println(rdd.reduce(_ + _))
  }

  def lookup() = {
    val rdd = sc.parallelize(List(('a', 1), ('a', 2), ('b', 1), ('b', 2)))
    rdd.lookup('a').foreach(println)
  }

  def main(args: Array[String]) {
    wordCount()
    sc.stop()
  }

  def wordCount() = {
    val lines = sc.textFile("file:///D:/1.txt")
    lines.flatMap(_.split("\\s")).map((_, 1)).reduceByKey(_ + _)
      .collect().foreach(println)
  }

}
