package cn.wangjie.spark.format

import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object SparkInputFormatTest {

  def main(args: Array[String]): Unit = {
    // 构建SparkContext上下文实例对象
		val sc: SparkContext = {
			// 1.a 创建SparkConf对象，设置应用属性，比如应用名称和运行模式
			val sparkConf = new SparkConf()
				.setAppName(this.getClass.getSimpleName.stripSuffix("$"))
				.setMaster("local[2]")
			// 1.b 创建实例对象
			SparkContext.getOrCreate(sparkConf)
		}
		
		/*
			  def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]](
			      path: String,
			      fClass: Class[F],
			      kClass: Class[K],
			      vClass: Class[V],
			      conf: Configuration = hadoopConfiguration
			  ): RDD[(K, V)]
		 */
		val inputRDD: RDD[(LongWritable, Text)] = sc.newAPIHadoopFile(
			"datas/wordcount/wordcount.data", //
			classOf[TextInputFormat], //
			classOf[LongWritable], //
			classOf[Text]
		)
		
		inputRDD.map(pair => pair._2.toString).foreach(println)
		
		// 应用结束，关闭资源
		sc.stop()
	}
	
}
