package spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  *
  */
object DependencyTest {
	def main(args: Array[String]): Unit = {
		val conf: SparkConf = new SparkConf()
		conf.setMaster("local")
		conf.setAppName("Dependency")
		val sc: SparkContext = new SparkContext(conf)
		val num1: Array[Int] = Array(100,80,70)
		val rddnuml: RDD[Int] = sc.parallelize(num1)
		val mapRdd: RDD[Int] = rddnuml.map(_*2)
		val ints: Array[Int] = mapRdd.collect()
		ints.foreach(println(_))
		
	}

}
object DependencyTest1{
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf()
		conf.setMaster("local")
		conf.setAppName("T2")
		val sc = new SparkContext(conf)
		//创建数组1
		val datal= Array ("s p ark","sca la","h ado op")
		//创建数组2
		val data2=Array ("SPARK","SCALA","HADOOP")
		//将数组1 的数据形成RDDl
		val rddl: RDD[String] = sc.parallelize(datal)
		
		val map: RDD[Array[String]] = rddl.map(_.split(" "))
		
		
		
		//将数组2 的数据形成RDD2
		val rdd2=sc . parallelize(data2)
		//把RDDl 与RDD2 联合
		val unionRdd = rddl.union(rdd2)
		//将结果收集并输出
		unionRdd.collect().foreach(println)
	}
}
object DependencyTest2{
	def main (args: Array[String]) {
		val conf = new SparkConf()
		conf.setMaster("local")
		conf.setAppName("T2")
		val sc = new SparkContext(conf)
		//设置输入的Tuple2 数组
		val data= Array(Tuple2 ("spark",100),Tuple2("spark",95),Tuple2("hadoop",99),Tuple2("hadoop",80),Tuple2("scala",75))
				//将数组内容转化为ROD
				val rdd = sc.parallelize(data)
		rdd.repartition(3)
		rdd.coalesce(2)
		//对ROD 进行groupByKey 操作
		val rddGrouped: RDD[(String, Iterable[Int])] = rdd.groupByKey()
		//输出结果
		rddGrouped.collect.foreach(println)
	}
}