package com.shujia.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo9ClusterRun {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .config("spark.sql.shuffle.partitions", "2")
      .appName("spark")
      .getOrCreate()


    import spark.implicits._
    import org.apache.spark.sql.functions._


    //读取hdfs中的数据
    val df: DataFrame = spark.read .format("csv").schema("id STRING ,name STRING ,age int ,gender STRING ,clazz STRING").load("/data/students.txt")


    //创建视图
    df.createOrReplaceTempView("student")


    val resultDF: DataFrame = spark.sql(
      """
        |
        |select gender,count(*) from student group by gender
        |
      """.stripMargin)


    resultDF.write
      .format("csv")
      .option("sep", ",")
      .mode(SaveMode.Overwrite)
      .save("/data/genderNum")
  }
}
