package com.etc

import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapreduce.InputSplit
import org.apache.hadoop.mapreduce.lib.input.{FileSplit, TextInputFormat}
import org.apache.spark.rdd.NewHadoopRDD
import org.apache.spark.{SparkConf, SparkContext}

object Inverted {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("Inverted").setMaster("local")
    val sc = new SparkContext(conf)
    val input = "E:\\input\\Inverted"

    val fileRDD = sc.newAPIHadoopFile[LongWritable, Text, TextInputFormat](input)

    val hadoopRDD = fileRDD.asInstanceOf[NewHadoopRDD[LongWritable, Text]]

    val fileAdnLine = hadoopRDD.mapPartitionsWithInputSplit((inputSplit:InputSplit, iterator:Iterator[(LongWritable, Text)]) => {
      val file = inputSplit.asInstanceOf[FileSplit]
      iterator.map(x => {"" + x._2 })
        .flatMap(_.split(" "))
        .map(x => ((x + " " + file.getPath.getName),1))
    })
    fileAdnLine.reduceByKey(_+_).foreach(a => println(a._1 + "-->" +a._2))
  }
}