package com.atbeijing.bigdata.spark.core.wc

import org.apache.spark.{SparkConf, SparkContext}

object Spark01_WordCount {

    def main(args: Array[String]): Unit = {

        // TODO - Spark - WordCount
        // you(client) => SparkContext => spark(server) => wordcount
        // you(client) => Connection(url, user, password) => mysql(server) => insert

        // Env => 环境

        // TODO 1. 建立Spark引擎的连接(环境)
        val conf = new SparkConf().setMaster("local").setAppName("WordCount")
        val sc = new SparkContext(conf)

        // TODO 2. 数据的统计分析
        // 2.1 读取文件数据源，获取原始数据
        //     line, line, line
        val lines = sc.textFile("data/word.txt")

        // 2.2 将原始数据进行切分，形成一个一个单词
        //     line => word1, word2, word3
        val words = lines.flatMap( line => line.split(" "))

        // 2.3 将数据根据单词进行分组
        //     word => List(word, word, word)
        val groups = words.groupBy(word=>word)

        // 2.4 将分组后的数据进行分析
        //     word => 3
        val wordcount = groups.map{
            case ( word, list ) => {
                (word, list.size)
            }
        }

        // 2.5 将统计分析的结果打印在控制台上
        wordcount.collect().foreach(println)
        //wordcount.foreach(println)

        // TODO 3. 关闭连接
        sc.stop()

    }
}
