package com.scala.spark

import org.apache.spark.sql.SparkSession

object BasicOperation {
    case class Employee(name:String,age:Long,depId:Long,gender:String,salary:Long)
    def main(args: Array[String]): Unit = {
        val spark = SparkSession.builder().appName("action").master("local").getOrCreate()
        import spark.implicits._
        val employee=spark.read.json("employee")
        //第一步 cache()避免重复计算
      /*  employee.cache()
        println(employee.count())
        println(employee.count())*/

        //创建临时视图，主要是为了，可以直接对数据执行sql语句
        employee.createOrReplaceTempView("temp")
        spark.sql("select  * from temp where age>25").show()
        //获取sql执行计划
        //dataframe/dataset，比如执行了一个sql语句获取的dataframe，实际上内部包含一个logical plan，逻辑执行计划
        //设计执行的时候，首先会通过底层的catalyst optimizer，生成物理执行计划，比如说会做一些优化，比如push filter
        //还会通过whole-stage code generation技术去自动化生成代码，提升执行性能
        spark.sql("select  * from temp where age>25").explain()
        employee.printSchema()

        val employDataSet = employee.as[Employee]
        employDataSet.show()
        employDataSet.printSchema()
        val frame = employDataSet.toDF()
        frame.show()
        frame.printSchema()

    }
}
