package com.zyh.day01

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object WordCountSparkJob {
  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME","root")
    //1 创建sparkContext对象
    val conf: SparkConf = new SparkConf()
      .setAppName("wordcount")
      //.setMaster("local[*]")
    val sc:SparkContext = new SparkContext(conf)

    //2 使用sc读取文件，转换为RDD
    val fileRDD: RDD[String] = sc.textFile("hdfs://hadoop10:9000/sparktest/words.txt")

    //3 执行转换算子
    val resultRDD: RDD[(String, Int)] = fileRDD.flatMap(item => item.split("\\s+")).map(item => item -> 1).reduceByKey((v1, v2) => v1 + v2)

//    //4 收集
//    val tuples: Array[(String, Int)] = value.collect()
//    tuples.foreach(println)
    //和mapreduce程序一样，目标文件夹不能存在
    resultRDD.saveAsTextFile("hdfs://hadoop10:9000/sparktest/out")
    //释放资源
    sc.stop()
  }
}
