package com.shujia.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo8SubmitYarn {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession
      .builder()
      //如果是提交到linux中执行，不用这个设置
      //      .master("local")
      .appName("spark sql yarn submit")
      .config("spark.sql.shuffle.partitions", 1) //优先级：代码的参数 > 命令行提交的参数 > 配置文件
      .getOrCreate()

    import sparkSession.implicits._
    import org.apache.spark.sql.functions._

    //读取数据，如果是yarn提交的话，默认读取的是hdfs上的数据
    val studentsDF: DataFrame = sparkSession.read.format("csv").option("sep", ",").schema("id STRING,name STRING,age INT,gender STRING,clazz STRING").load("/bigdata29/spark_in/data/student")

    val genderCountsDF: DataFrame = studentsDF.groupBy($"gender").agg(count($"gender") as "counts")

    //将DF写入到HDFS中
    genderCountsDF.write.format("csv").option("sep",",").mode(SaveMode.Overwrite).save("/bigdata29/spark_out/out2")

  }

}
