package com.spark.WorCount.sql

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

object RDDToDataFrameReflectByScala {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setMaster("local")
    //创建SparkSession对象，里面包含SparkContext和SqlContext
    val sparkSession = SparkSession.builder()
      .appName("RDDToDataFrameReflectByScala")
      .config(conf)
      .getOrCreate()
    val sparkContext=sparkSession.sparkContext
    val rdd=sparkContext.parallelize(Array(("hello",18),("lz",20),("lwy",19)))
    //需要导入隐式转换
    import sparkSession.implicits._
    val rddDf=rdd.map(tup=>Student(tup._1,tup._2)).toDF()
    rddDf.createTempView("student")
    val result=sparkSession.sql("select name,age from student where age>18")
    val resultRdd=result.rdd
    //从row中取数据，封装成student，打印到控制台
//    resultRdd.map(record=>Student(record(0).toString,record(1).toString.toInt))
//      .collect()
//      .foreach(println(_))
    //使用row的getAs()方法，获取指定列名的值和属性类型
    resultRdd.map(record=>Student(record.getAs[String]("name"),record.getAs[Int]("age")))
      .collect()
      .foreach(println(_))
    sparkSession.stop()
  }
}
case class Student(name:String,age:Int)