package com.huang.week9.spark

import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapreduce.lib.input.{FileSplit, TextInputFormat}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.NewHadoopRDD

/**
 * Hello world!
 *
 */
object App {
  def main(args: Array[String]): Unit = {
    val appName = "InvertedIndex-yyf"

    val inputPath: String = args.apply(0)
    val outputPath: String = args.apply(1)
    val master = args.apply(2)
    val sparkConfig = new SparkConf().setAppName(appName).setMaster(master)
    val sc = new SparkContext(sparkConfig)

    val text = sc.newAPIHadoopFile(inputPath, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], sc.hadoopConfiguration)

    val linesWithFile = text.asInstanceOf[NewHadoopRDD[LongWritable, Text]]
      .mapPartitionsWithInputSplit((inputSplit, iterator) => {
        val file = inputSplit.asInstanceOf[FileSplit]
        iterator.map(tup => (file.getPath.toString.split("/").last, tup._2))
      })

    val fileContent = linesWithFile.flatMap {
      case (fileName, text) => text.toString.split("\r\n")
        .flatMap(line => line.split(" "))
        .map { word => (word, fileName) }
    }

    val invertedIndex = fileContent.groupByKey()

    val group = invertedIndex.map {
      case (word, tup) =>
        val fileCountMap = scala.collection.mutable.HashMap[String, Int]()
        for (fileName <- tup) {
          val count = fileCountMap.getOrElseUpdate(fileName, 0) + 1
          fileCountMap.put(fileName, count)
        }
        (word, fileCountMap)
    }.sortByKey().map(word => s"${word._1}:${word._2}")

    group.repartition(1).saveAsTextFile(outputPath)
  }
}
