package com.gjy.learning.scala

import org.apache.spark.SparkConf


object WordCount {
  def main(args: Array[String]) {
    val conf = new SparkConf().setAppName("WordCount").setMaster("local")
    val sc = new org.apache.spark.SparkContext(conf)
    // 1.读取src/main/resources/wordcount.txt⽂件
    val input = sc.textFile("src/main/resources/wordcount.txt")
    for (line <- input.collect()) {
      println("读取⽂件内容：" + line)
    }
    // 2.统计⽂件⾏数
    val count = input.count()
    println("⽂件⾏数：" + count)
    // 3.获取⽂件的第⼀⾏
    val firstLine = input.first()
    println("⽂件的第⼀⾏：" + firstLine)
    // 4.过滤包含hello这个单词的⾏
    val helloLines = input.filter(line =>
      line.contains("hello")).collect()
    System.out.println("包含hello这个单词的⾏ " + helloLines)
    println("包含hello这个单词的⾏：" + helloLines.mkString("[", ", ", "]"))
    // 指定helloLines的数据类型为List[String]
    val helloLines1: List[String] = input.filter(line =>
      line.contains("hello")).collect().toList
    println("包含hello这个单词的⾏：" + helloLines1)
    System.out.println("包含hello这个单词的⾏ " + helloLines1)

    val helloCount = input.filter(line => line.contains("hello")).count()
    println("包含hello这个单词的⾏数：" + helloCount)
    // 6.统计每个单词出现的次数
    println("每个单词出现的次数：reduceByKey")
    val wordCount1 = input.flatMap(line => line.split(" ")).map(word => (word, 1)).reduceByKey(_ + _)
    println("每个单词出现的次数1：" + wordCount1.collect().mkString("\n"))
    println("每个单词出现的次数：groupByKey")
    val wordCount2 = input.flatMap(line => line.split(" ")).map(word => (word, 1)).groupByKey().mapValues(_.sum)
    println("每个单词出现的次数2：" + wordCount2.collect().mkString("\n"))
    // 5.有多少⾏包含hello这个单词
    //停止spark服务
    sc.stop()
  }
}
