package spark.SecondarySort

import org.apache.hadoop.conf.Configuration
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.immutable.HashMap

/**
  * SortBy 二次排序，全局排序
  */
object SecondarySort {
	def main(args: Array[String]): Unit = {
		val sparkConf = new SparkConf().setAppName("SecondarySort").setMaster("local[*]")
		val sc = new SparkContext(sparkConf)
		
		val textFile: RDD[String] = sc.textFile("D:\\user\\zhangjian\\input\\spark\\sort.txt")
		val filterRDD: RDD[String] = textFile.filter(lines => {
			val line: Array[String] = lines.split(" ")
			if (line.length != 2) {
				false
			} else {
				true
			}
		})
		val sortRDD: RDD[String] = filterRDD.sortBy(lines => {
			val line: Array[String] = lines.split(" ")
			val person = new Person(line(0), line(1).toInt)
			(person.name, person.age)
		})
		val outPath:String = "D:\\user\\zhangjian\\output\\spark"
		import spark.util.MyPredef.delete
		outPath.delete()
		sortRDD.saveAsTextFile(outPath)
	}
}

/**
  * eq、equals、sameElements的使用
  *
  *
  */
object equalsTest extends App {
	private val arr1 = Array("1","2","p")
	private val arr2 = Array("1","2","p")
	println("arr1 sameElements arr2:"+arr1.sameElements(arr2))
	private val hm1 = HashMap(("大毛",14),("二毛",11),("二毛",21))
	private val hm2 = HashMap(("大毛",14),("二毛",11),("二毛",21))
	println("hm1 sameElements hm2:"+hm1.sameElements(hm2))
	
	private val c1 = new Cat("大毛",14)
	private val c2 = new Cat("大毛",14)
	println(c1.equals(c2)) // true
	println(c1.eq(c2)) // false
	
}
case class Cat(name:String,age:Int){
}