package com.one

import org.apache.spark.{SparkConf, SparkContext}

object InvertedIndexTest {

  //提交命令：spark-submit --master spark://127.0.0.1:7077 --class com.one.InvertedIndexTest  bigdata_rdd-1.0-SNAPSHOT.jar hdfs://127.0.0.1:9000/spark/output
  def main(args: Array[String]) {

    val conf = new SparkConf().setAppName("WordCount").setMaster("local")

    val sc = new SparkContext(conf)

    var unionrdd = sc.emptyRDD[(String, String)] // rdd声明变量为 var

    val zeroFile = "F://0.txt";
    val zeroText = sc.textFile(zeroFile)
    val zeroRdd = zeroText.flatMap(line => line.split(" ")).map(("0", _))
    unionrdd = unionrdd.union(zeroRdd)

    val oneFile = "F://1.txt"
    val oneText = sc.textFile(oneFile)
    val oneRdd = oneText.flatMap(line => line.split(" ")).map(("1", _))
    unionrdd = unionrdd.union(oneRdd)

    val secondFile = "F://2.txt"
    val secronText = sc.textFile(secondFile)
    val secronRdd = secronText.flatMap(line => line.split(" ")).map(("2", _))
    unionrdd = unionrdd.union(secronRdd)

    val wordCountRdd = unionrdd.map(word => {
      (word, 1)
    }).reduceByKey(_ + _)

    //6.//调整输出格式,将（文件名，单词），词频）==》 （单词，（文件名，词频）） ==》 （单词，（文件名，词频））汇总
    val formatRdd = wordCountRdd.map(word => {
      (word._1._2, String.format("(%s,%s)", word._1._1, word._2.toString))
    })
    val formatCountRdd = formatRdd.reduceByKey(_ + "," + _)
    val resultRdd = formatCountRdd.map(word => String.format("\"%s\",{%s}", word._1, word._2))
    //保存结果
    resultRdd.saveAsTextFile(args(0))
    //resultRdd.foreach(println)

  }
}
