import org.apache.spark.{SparkConf, SparkContext}

/**
  * 统计字符出现次数
  */
object SparkETL {
  def main(args: Array[String]) {

//    new String{"C:\\jie\\temp\\log\\input","C:\\jie\\temp\\log\\output"}

//    val testArgsA = "C:\\jie\\temp\\log\\input C:\\jie\\temp\\log\\output".split(' ')
    val testArgs = Array("C:\\jie\\temp\\log\\input", "C:\\jie\\temp\\log\\output")

    // 设置本地目录
    System.setProperty("hadoop.home.dir", "D:\\hadoop\\hadoop-2.7.3")
    System.setProperty("user.name", "hdfs")
    System.setProperty("HADOOP_USER_NAME", "hdfs")
    if (testArgs.length < 1) {
      System.err.println("Usage: <file>")
      System.exit(1)
    }
    // Spark程序初始化类
    val conf = new SparkConf().setMaster("local").setAppName("test")
    val sc = new SparkContext(conf)
    val line = sc.textFile(testArgs(0))
    //val res = line.flatMap(_.split(" ")).map((_, 1)).reduceByKey(_+_).collect().foreach(println)
    //val rdd = line.flatMap(_.split("")).map((_, 1))
    val m = new MapUtil()
    val rdd = m.map(line)
    // 写入数据到hdfs系统
    rdd.repartition(1).saveAsTextFile(testArgs(1))
    sc.stop()
  }
}
