package com.shujia.sql

import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo6Submit {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .appName("submit")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    /**
      * 读取hdfs中的数据
      *
      */

    val student: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING, age INT, gender STriNG ,clazz STRING")
      .load("/data/student/")


    //处理数据
    val clazzNum: DataFrame = student
      .groupBy($"clazz")
      .agg(count($"id") as "sum_num")


    /**
      * 保存数
      *
      */

    clazzNum
      .write
      .format("csv")
      .option("sep", ",")
      .save("/data/clazz_num")


    /**
      * 提交任务
      * spark-submit --master yarn-client --num-executors 1 --executor-cores 1 --executor-memory 1G --class com.shujia.sql.D
      * emo6Submit Spark-1.0.jar
      *
      *
      * 查看结果
      * hadoop dfs -cat /data/clazz_num/
      */
  }

}
