package com.shujia.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo8Submit {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      // .master("local")//提交到集群运行，不需要写
      .appName("submit")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._


    //读取hdfs中的文件
    val student: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      .load("/data/students.txt")

    //统计性别人数
    val genderNum: DataFrame = student
      .groupBy($"gender")
      .agg(count($"gender") as "c")


    //保存数据
    genderNum
      .write
      .format("csv")
      .option("sep", "\t")
      .mode(SaveMode.Overwrite)
      .save("/data/gender_num")

    /**
      * spark-submit --master yarn-client --class com.shujia.sql.Demo8Submit --executor-memory 1G --executor-cores 1 --num-exe
      * cutors 1 --conf spark.sql.shuffle.partitions=1 spark-1.0.jar
      *
      */

  }

}
