package clean

import java.util.logging.{Level, Logger}

import org.apache.spark.{SparkConf, SparkContext}

/**
 * 2020-08-06
 *
 * @Author:yangyang
 * 测试通过：访问本地文件和hdfs文件
 * /usr/local/spark-2.1.1-bin-hadoop2.7/bin/spark-submit --master local[2] --class clean.CleanData  /home/test/BigDataProject-1.0-SNAPSHOT.jar  hdfs://127.0.0.1:9000/test.txt  /home/test/test.txt
 */
object CleanData {
  def main(args: Array[String]): Unit = {
    // 为了避免执行过程中打印过多的日志
//    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
//    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)

    //参数检查
    if (args.length < 2) {
      System.err.println("Usage: MyWordCout <input> <output> ")
      System.exit(1)
    }
    //获取参数
    val input=args(0)
    val output=args(1)

    val conf = new SparkConf().setAppName("CleanData").setMaster("local[2]")
    val sc = new SparkContext(conf)

    // 读取数据
    val fileRDD = sc.textFile(args(0))

    // 清洗数据
//    val cleanDataRDD = fileRDD.map(_.split(",")).filter(_(2).startsWith("http")).filter(_.length == 11)
    val cleanDataRDD=fileRDD.flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_)
    // 将清洗后的结果保存到HDFS
    fileRDD.saveAsTextFile(args(1))

    // 停止SparkContext
    sc.stop()

    println("Finished")

  }
}
