package com.max.apptpl.wordcount

import org.apache.hadoop.mapred.TextOutputFormat
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object SWordcountBase {
  // spark实现wordcount
  def main(args: Array[String]): Unit = {
    // 传入的参数
    val inPath = args(0)
    val outPath = args(1)

    val sparkConf:SparkConf = new SparkConf().setAppName("ScalaWordCount")  //.setMaster("local[2]")
    val sc:SparkContext = new SparkContext(sparkConf)
    val file:RDD[String] = sc.textFile(inPath)
    // val result:RDD[(String,Int)] = file.flatMap(_.split(" ")).map(x => (x, 1)).reduceByKey(_ + _)
    val words:RDD[String] = file.flatMap(_.split(" "))
    val wordAndCount:RDD[(String, Int)] = words.map(x=>(x, 1))
    val result:RDD[(String, Int)] = wordAndCount.reduceByKey(_ + _)

    // 打印结果
    result.collect().foreach(println)
    // 将结果写到文件中
    result.saveAsTextFile(outPath)
    // 将结果写到HDFS中
//    result.saveAsHadoopFile("hdfs://localhost:8020/user/max/log/wordcount/out2", classOf[String], classOf[Integer], classOf[TextOutputFormat[String, Integer]])
    sc.stop()


  }

}
