import cn.doitedu.commons.util.SparkUtil
case class Pc(uid:String,age:String,ht:String,wt:String,sal:String)

object SchemaDemo {
  def main(args: Array[String]): Unit = {

    val spark = SparkUtil.getSparkSession("")
    import spark.implicits._

    val x = spark.read.option("header", true).csv("E:\\doit12_yiee\\userprofile\\data\\demo\\persons.txt").selectExpr("uid","cast(age as int) as age","ht","wt","cast(sal as double) as sal")
    /*val ds = df.as[Pc].groupBy("age").avg("sal").where("age<40")*/

    x.createTempView("ds")
    val ds = spark.sql(
      """
        |select
        |age,avg(sal)
        |from
        |(
        |  select
        |    uid,age,ht,wt,sal
        |  from
        |    (
        |     select
        |      uid,age,ht,wt,sal,
        |      row_number() over(partition by age order by sal) as rn
        |     from ds
        |    ) o
        |  where cast(ht as int) > 170 and rn<=2
        |) o2
        |group by age
        |
        |""".stripMargin)




    println("logical..................")
    println(ds.queryExecution.logical)

    println("analyzed..................")
    println(ds.queryExecution.analyzed)

    println("optimizedPlan..................")
    println(ds.queryExecution.optimizedPlan)

    println("sparkPlan..................")
    println(ds.queryExecution.sparkPlan)

    println("executedPlan..................")
    println(ds.queryExecution.executedPlan)


    spark.close()




  }

}
