package doit20.sparksql

import org.apache.spark.sql.catalog.Table
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.{QueryExecution, SparkPlan}
import org.apache.spark.sql.{Dataset, SparkSession}

/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-04-12
 * @desc 理解sparksql的sql解析及查询计划
 */
object Demo16 {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName("")
      .master("local")
      .getOrCreate()


    val df = spark.read.option("header", true).csv("data/stu2.txt")
    df.createTempView("df")

    /**
     * sparksession中管理元数据的模块： catalog
     */
    val tables: Dataset[Table] = spark.catalog.listTables()
    //val tables2 = spark.sessionState.catalog.listTables("default")
    tables.show()


    /**
     * sparksession中用于sql语法解析的模块：  sessionState中的 SqlParser
     */
    val state = spark.sessionState
    // state.catalog   state中也持有catalog元数据管理模块
    val parser = state.sqlParser   // sql语法解析生成语法树AST
    val logicPlan: LogicalPlan = parser.parsePlan("select ip,uid,url,timestamp from app_log")
    println(logicPlan)
    /**
     * 'Project ['ip, 'uid, 'url, 'timestamp]
     * +- 'UnresolvedRelation [app_log], [], false
     */


    val sql =
      """
        |
        |select
        |  b.id,
        |  b.name,
        |  b.age,
        |  b.sex,
        |  a.score
        |from
        |    (
        |       select sex,avg(score)  from  t1 group by sex
        |	)  a
        |
        |join
        |    (
        |	  select id,name,age,sex from t2 where id>100
        |	)  b
        |
        |where score>20
        |
        |""".stripMargin
    val plan = parser.parsePlan(sql)
    println(plan)

    /**
     * 语法树：逻辑执行计划 LogicPlan（不完全的逻辑执行计划）
     * 'Project ['b.id, 'b.name, 'b.age, 'b.sex, 'a.score]
     * +- 'Filter ('score > 20)
     * +- 'Join Inner
     * :- 'SubqueryAlias a
     * :  +- 'Aggregate ['sex], ['sex, unresolvedalias('avg('score), None)]
     * :     +- 'UnresolvedRelation [t1], [], false
     * +- 'SubqueryAlias b
     * +- 'Project ['id, 'name, 'age, 'sex]
     * +- 'Filter ('id > 100)
     * +- 'UnresolvedRelation [t2], [], false
     */


    /**
     * 下面，用一个真实存在的表和sql，来演示完整计划生成流程
     */
    val df2 = spark.read.option("header", true).csv("data/stu2.txt")
    df2.createTempView("t1")

    val df3 = spark.read.option("header", true).csv("data/stu_other.csv")
    df3.createTempView("t2")

    val sql2 =
      """
        |
        |select
        |  a.id,
        |  a.gender,
        |  a.avg_age
        |from
        |   (
        |     select
        |       id,gender,avg(age) as avg_age
        |     from
        |     (
        |       select id,name,age,score,gender from t1 where age > 15+15
        |     ) tmp
        |     group by id,gender
        |   ) a
        |join
        |   (
        |     select id,department from t2 where id <100  and id < 50
        |   ) b
        |on a.id = b.id
        |where a.id>10
        |
        |""".stripMargin

    /**
     * SparkSession中对AST做Analyze分析（绑定元信息）： sessionState中的analyzer
     */
    val ast = spark.sessionState.sqlParser.parsePlan(sql2)
    val analyzer = spark.sessionState.analyzer
    // analyzer的实际执行过程，是很复杂的，有一个规则的链条，手动调用不太方便，如下语句不会产生任何结果
    val logicPlanAnalyzed: LogicalPlan = analyzer.execute(ast)

    // 直接 调用最高封装方法  executePlan() ，它会把AST语法树，依次执行 analyze，optimize，logical转physical,得到最终的执行计划
    val execution: QueryExecution = spark.sessionState.executePlan(ast)

    // 元数据分析绑定后的逻辑计划
    val analyzedLogicPlan: LogicalPlan = execution.analyzed

    // RBO（规则优化）优化后的逻辑计划
    val optimizedLogicPlan: LogicalPlan = execution.optimizedPlan

    // CBO（成本优化）优化后的物理执行计划
    val sparkPlan: SparkPlan = execution.sparkPlan


    println("------ast logicplan-----")
    println(ast)

    println("------analyzedLogicPlan-----")
    println(analyzedLogicPlan)


    println("------optimizedLogicPlan-----")
    println(optimizedLogicPlan)

    println("------sparkPlan-----")
    println(sparkPlan)


    spark.sql(sql2).show(100,false)

    spark.close()
  }

}
