import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession

import java.io.File
import scala.collection.mutable.ListBuffer

object Main{

  case class WordAndFile(word:String, file: String)

  def listFiles(hdfsPath: String, sc: SparkContext): List[String] = {
    val fs = FileSystem.get(sc.hadoopConfiguration)
    val itr = fs.listFiles(new Path(hdfsPath), true)
    val list = new ListBuffer[String]

    while (itr.hasNext){
      val fileStatus = itr.next()
      if(fileStatus.isFile) {
        list.append(fileStatus.getPath.getName)
      }
    }

    fs.close()
    list.toList
  }

  def fileName(path:String): String ={
    val beginIndex = path.lastIndexOf(File.separator)
    path.substring(beginIndex+1)
  }

  def main(args: Array[String]): Unit ={
    if (args.length < 1) {
      System.err.println("Usage: InvertedIndex <hdfs://host:port/paths>")
      System.exit(1)
    }

    val hdfsPath = args(0)
    val spark = SparkSession
      .builder()
      .appName("InvertedIndex demo")
      .getOrCreate()
    val sc =spark.sparkContext

    sc.wholeTextFiles(hdfsPath).flatMap(pair=> pair._2.split("[ |\n]")
      .map(WordAndFile(_, fileName(pair._1))))
      .groupBy(_.word)
      .map(wf => (wf._1, wf._2.groupBy(_.file).map(x => (x._1, x._2.count(y=>true))).toList))
      .reduceByKey(_++_)
      .sortByKey()
      .map(item => s"${item._1}:{${item._2.map(x => s"(${x._1},${x._2})").reduce((a, b) => a + "," + b)}}")
      .foreach(println(_))

    sc.stop()
  }

}