package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo7DFCache {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("submit")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //1、读取hdfs中的数据
    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING ,name STRING ,age INT , gender STRING ,clazz STRING")
      .load("data/students.txt")

    /**
      * 对多次使用的df进行缓存
      *
      */
    studentDF.cache()


    studentDF
      .groupBy($"clazz")
      .agg(count($"clazz") as "c")
      .show()

    studentDF
      .groupBy($"gender")
      .agg(count($"gender") as "c")
      .show()


    val scoreDF: DataFrame = spark.read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING , cid STRING ,sco INT")
      .load("data/score.txt")


    /**
      * mapjoin
      *
      */

    studentDF
      //将小表广播出去
      .hint("broadcast")
      .join(scoreDF, "id")
      .show()


    while (true) {

    }

  }

}
