import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object ScalaWordCount {
  def main(args: Array[String]): Unit = {
    //创建spark配置，设置应用程序名字
    //val conf = new SparkConf().setAppName("ScalaWordCount")
    val conf = new SparkConf().setAppName("ScalaWordCount").setMaster("local[4]")
    //创建spark执行的入口
    val sc = new SparkContext(conf)
    sc.setLogLevel("ERROR")


    //    val rdd1 = sc.parallelize(List(5, 6, 4, 7, 3, 8, 2, 9, 1, 10))
    //
    //    val lines = sc.textFile("C:\\Users\\LIQIANG\\Desktop\\text")
    //
    //
    //    val value: RDD[(String, String)] = lines.map(x => (x.split(" ")(0),x))
    //
    //
    //    val rdd = sc.parallelize(List((1,2),(3,4),(3, 6)))
    //
    //    rdd.reduceByKey((x,y) => x*y).collect().foreach(print)
    //
    //    rdd.groupByKey().collect.foreach(print)
    //
    //    rdd.mapValues(x =>(x+1)).collect.foreach(print)

    val rdd = sc.parallelize(List((1, 2), (3, 4), (3, 6)))

    val other = sc.parallelize(List((3, 9)))


//    rdd.rightOuterJoin(other).collect.foreach(print)

//    rdd.cogroup(other).collect.foreach(print)


    rdd.mapValues(x => (x, 1)).reduceByKey((x, y) => (x._1 + y._1, x._2 + y._2)).collect.foreach(print)

    //释放资源
    sc.stop()
  }
}
