package com.gome.han.bigdata.spark.core.wordcount.wcount

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @author Hanpeng
 * @date 2021/1/8 19:21
 * @description:
 */
object WordCount03 {
  def main(args: Array[String]): Unit = {
    //01 app创建和spark 框架的连接(我们的目的时让spark 框架运行我们的程序)
    val sparkConf: SparkConf = new SparkConf();
    sparkConf.setAppName("WordCount01");
    //设置连接master的url
    sparkConf.setMaster("local[*]");
    val sc = new SparkContext(sparkConf); //可以run 一下 如果不报错 说明环境搭建正确
    //02 处理业务
    //读取每行数据
    val lines:RDD[String] = sc.textFile("in");
    //将每行数据进行拆分成一个个单词  也叫扁平化处理  将每个元素拆分成一个个单词
    //这样元素的单位是个word
    val words: RDD[String] = lines.flatMap(_.split(" "));
    //将单词相同的单词进行分组 便于统计  (hello, hello, hello), (world, world)
    val wordGroup:RDD[(String, Iterable[String])] = words.groupBy(word => word);
    //对数据每个元素进行转换
    val wordToCount = wordGroup.map({


      case (word,list)=>{
        (word, list.size)
      }
    });
    val wordToCount1 = wordGroup.map(wordhan=>{
      (wordhan._1,wordhan._2.size)
    });
    /*map(x.match{case(x._1,x._2) => 后面是一样的}*/
    /*val wordToCount3 = wordGroup.map((han=>{
      han match {
        case (x,y)=>{
          (x,y.size)
        }
      }
    }));*/
    //这个执行的操作是  先将han赋值给(x, y)  看是否能这样赋值，如果成功(因为han是Tuple),则 看是否相等(显然相等) 执行操作
    val wordToCount3 = wordGroup.map ( han =>
      han match {
        case (x, y) => (x, y.size)
      }
    );

    //打印结果
    val array: Array[(String, Int)] = wordToCount3.collect()
    array.foreach(println)
    //03 关闭连接和spark 框架的连接
    sc.stop();
  }
}
