package org.hadoop.spark
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkConf, SparkContext}
object WordCount2 {
  def main(args: Array[String]): Unit = {
    if (args.length < 2) {
      print("usage :<in> <out>");
      return;
    }
    val in: String = args.apply(0);
    val out: String = args.apply(1);
    val conf: SparkConf = new SparkConf();
    conf.set("fs.defaultFS", "hdfs://server201:8020");
    conf.setAppName("WordCount");
    var sc: SparkContext = new SparkContext(conf);
    //获取hadoop config
    val hadoopConfig: Configuration = new Configuration();
    hadoopConfig.set("fs.defaultFS", "hdfs://server201:8020");
    val fs: FileSystem = FileSystem.get(hadoopConfig);
    val pathOut: Path = new Path(out);
    if (fs.exists(pathOut)) {
      fs.delete(pathOut, true); //删除已经存在的文件
    }
    if (!fs.exists(new Path(in))) {
      print("文件或目录不存在：" + in);
      return;
    }
    val rdd = sc.textFile(in, minPartitions = 2);
    rdd.flatMap(_.split("\\s+"))
      .map((_, 1))
      .reduceByKey(_ + _)
      .sortByKey()
      .map(kv => kv._1 + "\t" + kv._2)
      .saveAsTextFile(out);
    sc.stop();
  }
}