package cn.doitedu.udf

import cn.doitedu.util.SparkUtil
import org.apache.spark.sql.DataFrame

/**
 * @Date 22.4.13
 * @Created by HANGGE
 * @Description
 */
object C04_Demo04 {
  def main(args: Array[String]): Unit = {
    val session = SparkUtil.getSession

    val df1: DataFrame = session.read.json("data\\json\\user.json")
    val df2 = session.read.option("header", true).option("inferSchema", true).csv("data\\csv\\orders.csv")

    /**
     * root
     * |-- age: long (nullable = true)
     * |-- gender: string (nullable = true)
     * |-- id: long (nullable = true)
     * |-- name: string (nullable = true)
     *
     * root
     * |-- oid: integer (nullable = true)
     * |-- item_name: string (nullable = true)
     * |-- money: integer (nullable = true)
     * |-- id: integer (nullable = true)
     */

// SessionCataLog   df1  [数据源  结构  schema  表名  数据库 ]
    df1.createOrReplaceTempView("db.tb_user")
    df2.createOrReplaceTempView("tb_order")
   val sql =
     """
       |select
       |t1.id as  id1 ,
       |t1.name ,
       |t2.oid ,
       |t2.item_name ,
       |t2.money
       |from
       |(select id , name , gender from tb_user where id > 2 ) t1
       |join
       |(select oid , item_name , money ,id  from tb_order where oid > 2) t2
       |on  t1.id = t2.id
       |where  t1.id  > 3
       |""".stripMargin
  // 执行SQL语句   解析-->逻辑执行计划-->元素数据绑定-->优化-->物理执行计划
    val frame = session.sql(sql)
    frame.explain("extended")

   //  手动的执行逻辑执行计划  没有绑定元数据的逻辑执行计划
  /*  val plan = session.sessionState.sqlParser.parsePlan(sql)
    println(plan)*/

  }


}
