package cn.kaiux.bigdata.homework.week05_06

import org.apache.hadoop.fs.Path
import org.apache.spark.sql.SparkSession

/**
 * @author kaiux@foxmail.com
 * @date 2022/6/11 14:12
 */
object InvertedIndex {
  def main(args: Array[String]): Unit = {

    val options = InvertedIndexOptionsParsing.parse(args)

    val sparkSession = SparkSession
      .builder()
      .appName("InvertedIndex")
      .getOrCreate()

    val sourcePaths = options.getPaths._1
    val targetPath = options.getPaths._2

    val qualifiedSourcePaths = sourcePaths.map(FileUtils.pathToQualifiedPath(sparkSession.sparkContext.hadoopConfiguration, _))
    val qualifiedTargetPath = FileUtils.pathToQualifiedPath(sparkSession.sparkContext.hadoopConfiguration, targetPath)

    val sparkContext = sparkSession.sparkContext


    val allFiles = qualifiedSourcePaths.flatMap(sourcePath => {
      val sourceFS = sourcePath.getFileSystem(sparkContext.hadoopConfiguration)
      sparkContext
        .parallelize(FileUtils.listFiles(sourceFS, sourcePath, 3))
        .filter(_._1.isFile)
        .map(f => f._1.getPath.toUri.toString)
        .collect()
        .toSet
    })

    var unionRDD = sparkContext.emptyRDD[(String, String)]

    allFiles.foreach(file => {
      val path = new Path(file)
      val fileName = path.getName
      val fileRDD = sparkContext
        .textFile(file)
        .flatMap(_.split(" ").map((fileName, _)))
      unionRDD = unionRDD.union(fileRDD)
    })

    val result = unionRDD
      .map(word => (word, 1))
      .reduceByKey(_ + _)
      .map(word => (word._1._2, String.format("(%s,%s)", word._1._1, word._2.toString)))
      .reduceByKey(_ + "," + _)
      .map(word => String.format("\"%s\",{%s}", word._1, word._2))

    result
      .collect()
      .foreach(println)

    result.saveAsTextFile(qualifiedTargetPath.toUri.toString)

  }
}