package com.xiaoxu.spark.DateFrameWithDataset

import org.apache.spark.sql.SparkSession

object DataFrameDemo {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("DataFrameDemo")
      .master("local[2]")
      .getOrCreate()

    //将dataFrame加载成为一个dataFrame
    val dataFrame = spark
      .read
      .format("json")
      .load("D:/test/data/people.json")

    //输出dataFrame的schema信息
    dataFrame.printSchema()
    //输出数据集的前20条数据
    dataFrame.show()
    //查询某列所有的数据 select name from tableName
    dataFrame.select("name").show()
    //查询某几列数据，并对某列数据进行操作 select name ,age+10 as ageAdd from tableName
    dataFrame.select(
      dataFrame.col("name"),
      (dataFrame.col("age") + 10).as("ageAdd")
    ).show()
    //对某一列的值进行过滤  select * from tableName where age > 10
    dataFrame.filter(dataFrame.col("age")>10).show()
    //根据某一列进行分组，然后进行聚合操作 select age,count(1) from table groupBy age
    dataFrame.groupBy("age").count().show()
    spark.stop()

  }

}
