package com.hliushi.spark.rdd

import org.apache.spark.{SparkConf, SparkContext}
import org.junit.Test

/**
 * descriptions:
 *
 * author: Hliushi
 * date: 2021/5/16 9:23
 */
class SourceAnalysis {

  val conf: SparkConf = new SparkConf().setMaster("local[6]").setAppName("source_analysis")

  val sc = new SparkContext(conf)

  @Test
  def wordCount(): Unit = {
    // 2.创建数据集
    // textFile 算子作用是创建一个HadoopRDD[K, V], 但是最终返回的是一个MapPartitionsRDD
    val sourceRDD = sc.textFile("dataset/wordcount.txt")
    // 3.数据处理
    //    3.1.拆词
    val splitRDD = sourceRDD.flatMap(x => x.split(","))
    //    3.2.赋予初始词频
    val tupleRDD = splitRDD.map(x => x -> 1)
    //    3.3.聚合统计词频
    val reduceRDD = tupleRDD.reduceByKey((total, curr) => total + curr)
      .sortBy(x => x._2, ascending = false)
    //    3.4.将结果转为字符串
    val strRDD = reduceRDD.map((tuple: (String, Int)) => s"${tuple._1}, ${tuple._2}")

    // 4.结果获取
    //strRDD.collect().foreach(println(_))
    println("strRDD.toDebugString = ")
    println(strRDD.toDebugString)

    // 5.关闭sc, 执行
    sc.stop()
  }


  /**
   * narrowDependency  窄依赖
   */
  @Test
  def narrowDependency(): Unit = {
    // 需求: 求得两个RDD之间的笛卡尔积
    // 1.生成RDD
    val numDataRDD = sc.parallelize(1 to 5)
    val charDataRDD = sc.parallelize('a' to 'c')

    // 2.计算
    val resultRDD = numDataRDD.cartesian(charDataRDD)

    // 3.结果获取
    resultRDD.collect().foreach(println(_))

    sc.stop()
  }
}