package io.sqrtqiezi.spark.rdd

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

import scala.util.Random

object RDDSample {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local")
      .appName("simple rdd sample")
      .getOrCreate()


    val rdd1 = spark.sparkContext.parallelize(Array(("spark", 12), ("hadoop", 26), ("hadoop", 23), ("scala", 24)), 10)
    println(rdd1.getNumPartitions)

    val words = rdd1.keys.distinct
    val charRdd = words.map(c => (c, new Random().nextDouble()))
    val charRdd2 = words.map(c => (c, new Random().nextDouble()))
    val charRdd3 = words.map(c => (c, new Random().nextDouble()))

    println("cogroup sample:")
    charRdd.cogroup(charRdd2, charRdd3).collect().foreach(println)

    println("join sample:")
    charRdd.join(charRdd2).collect().foreach(println)

    println("cogroup & flatMap as join")
    charRdd.cogroup(charRdd2)
      .flatMapValues(pair =>
        for(v <- pair._1; w <- pair._2) yield (v, w)
      )
      .collect().foreach(println)

    println("select a, count(b) from t where c > 12 group by a")
    rdd1.collect().foreach(println)
    println("---------------------rdd-----------------------------")
    rdd1.filter(item => item._2 > 12)
      .countByKey()
      .foreach(println)

    println("---------------------rdd-----------------------------")
    rdd1.filter{case (_, value) => value > 12}
      .groupByKey()
      .mapValues(values => values.size)
      .collect()
      .foreach(println)

    println("---------------------dataframe-----------------------------")
    val df = spark.createDataFrame(rdd1).toDF("name", "value")
    df.show

    df.where(col("value") > 12)
      .groupBy(col("name"))
      .agg(expr("count(value)"))
      .show

    println("---------------------sql-----------------------------")
    df.createOrReplaceTempView("myTable")

    spark.sql("select name, count(value) from myTable group by name")
      .show
  }
}
