package com.imooc.spark

import org.apache.spark.sql.SparkSession

/**
  * DataFrame API 基本操作
  */
object DataFrameApp {
  def main(args: Array[String]): Unit = {
    val sparkSession = SparkSession.builder().master("local[2]").appName("DataFrameApp").getOrCreate()

    //将Json文件加载成一个Dataframe
    val peopleDF = sparkSession.read.format("json").load("D:\\development\\spark-2.4.4-bin-hadoop2.6\\examples\\src\\main\\resources\\people.json")

    // 输出DataFrame schema信息
    peopleDF.printSchema()

    //输出数据集的前20条
    peopleDF.show()

    //查询某列所有的数据：select name from table
    peopleDF.select("name").show()
    peopleDF.select(peopleDF.col("name"),peopleDF.col("age")+10).show()

    //根据某一列的值进行过滤
    peopleDF.filter(peopleDF.col("age")>19).show()

    //根据某一列分组，然后聚合
    peopleDF.groupBy("age").count().show()

    sparkSession.stop()
  }
}
