package com.weic.spark.scala.p1

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Auther:BigData-weic
 * @ClassName:WordCount
 * @Date:2020/12/6 20:35
 * @功能描述: $FunctionDescription
 * @Version:1.0
 */
object RemoteScalaWordCountApp {
	def main(args: Array[String]): Unit = {
		if (args == null || args.length != 1){
			println(
				"""
				  |Usage: <input> <output>
				  |""".stripMargin
			)
			System.exit(-1)

		}
		var Array(input,output) = args

		val conf = new SparkConf()
		conf.setAppName(s"${RemoteScalaWordCountApp.getClass.getSimpleName}")
		conf.setMaster("local[*]")

		val sc = new SparkContext(conf)

//		val lines: RDD[String] = sc.textFile("file:\\F:\\datas\\wc\\input_wc\\a.txt")
		val lines: RDD[String] = sc.textFile(input)
//		println("-----partition---->"+lines.getNumPartitions)
		val words: RDD[String] = lines.flatMap(_.split("\\s+"))
		val wordMap: RDD[(String,Int)] = words.map(word => (word,1))
		//懒加载 lazy
		val ret: RDD[(String,Int)] = wordMap.reduceByKey(myReduceByKey)
		//触发Actions
//		ret.foreach(kv => println(kv._1+"--->"+kv._2))
//		ret.foreach(
//			{case(word,count) => {println(word+"--->"+count)}}
//		)
		ret.saveAsTextFile(output)
		//释放资源
		sc.stop()
	}

	def myReduceByKey (x:Int,y:Int): Int ={
//		var a = 1 / 0
		x + y
	}

}
