package com.shujia.sql

import org.apache.spark.sql.{DataFrame, Dataset, Row, SaveMode, SparkSession}

object Demo1SparkSQL {
  def main(args: Array[String]): Unit = {

    //1、创建Spark sql环境
    val spark: SparkSession = SparkSession
      .builder()
      //.master("local")
      .appName("sql")
      .config("spark.sql.shuffle.partitions", 1) //默认在集群中时200个
      .getOrCreate()


    /**
     * DataFrame: 底层就是RDD, 比RDD多了列名和列的类型
     */
    //2、读取数据
    val students: DataFrame = spark
      .read
      .schema("id STRING, name STRING, age INT, sex STRING , clazz STRING") //指定列名和类型
      .option("sep", ",") //指定列分隔符，csv格式默认时逗号
      .csv("/data/students.csv") //读取csv格式的数据


    //3、数据处理

    /**
     * DSL： 类sql语法
     */

    students
      .where("sex = '男'")
      .groupBy("clazz")
      .count()
      .show()


    /**
     * SQL API
     */
    //创建临时视图
    students.createOrReplaceTempView("students")
    //编写sql
    val clazzNumDF: DataFrame = spark.sql(
      """
        |select clazz,count(1) as num from
        |students
        |where sex = '女'
        |group by clazz
        |""".stripMargin)

    //保存计算结果
    clazzNumDF
      .write
      .mode(SaveMode.Overwrite) //覆盖写入
      .csv("/data/clazz_num")
  }
}
