package cn.itcast.spark.rdd

import org.apache.spark.{SparkConf, SparkContext}
import org.junit.Test

class SourceAnalysis {

  @Test
  def wordCount(): Unit = {
    // 1. 创建sc对象
    val conf = new SparkConf().setMaster("local[6]").setAppName("wordCount_source")
    val sc = new SparkContext(conf)

    // 2. 创建数据集
    val textRDD = sc.parallelize(Seq("hadoop spark", "hadoop flume", "spark sqoop"))

    // 3. 数据处理
    //    1. 拆词
    val splitRDD = textRDD.flatMap(_.split(" "))
    //    2. 赋予初始词频
    val tupleRDD = splitRDD.map((_, 1))
    //    3. 聚合统计词频
    val reduceRDD = tupleRDD.reduceByKey(_ + _)
    //    4. 将结果转化为字符串
    val strRDD = reduceRDD.map(item => s"${item._1}, ${item._2}")

    // 4. 结果获取
//    strRDD.collect().foreach(println(_))
    println(strRDD.toDebugString)

    // 5. 关闭sc执行
    sc.stop()
  }

  @Test
  def narrowDependency(): Unit = {
    // 需求：求得两个RDD之间的笛卡尔积

    // 1. 创建RDD
    val conf = new SparkConf().setMaster("local[6]").setAppName("cartesian")
    val sc = new SparkContext(conf)
    val rdd1 = sc.parallelize(Seq(1, 2, 3, 4, 5, 6))
    val rdd2 = sc.parallelize(Seq("a", "b", "c"))

    // 2. 计算
    val resultRDD = rdd1.cartesian(rdd2)

    // 3. 结果获取
    resultRDD.collect().foreach(println(_))

    sc.stop()
  }

}





















