package com.shujia.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo6Submit {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .appName("submit")
      .config("spark.sql.shuffle.partitions", 2)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._


    val student: DataFrame = spark.read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING , name STRING ,age INT ,gender STRING ,clazz STRING")
      .load("/data/student") //读取hdfs文件

    val clazznuNum: DataFrame = student
      .groupBy($"clazz")
      .agg(count($"clazz") as "num")


    //保存数据
    clazznuNum.write
      .format("csv")
      .option("sep", "\t")
      .mode(SaveMode.Overwrite)
      .save("/data/clazz_num")


  }

}
