package com.spark.WorCount.sql

import org.apache.spark.SparkConf
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{Row, SparkSession}

object RDDToDataFrameProgramByScala {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setMaster("local")
    //创建SparkSession对象，里面包含SparkContext和SqlContext
    val sparkSession = SparkSession.builder()
      .appName("RDDToDataFrameReflectByScala")
      .config(conf)
      .getOrCreate()
    val sparkContext=sparkSession.sparkContext
    val rdd=sparkContext.parallelize(Array(("hello",18),("lz",20),("lwy",19)))
    //将Rdd中的一个元组的数据组装成Row
    val rddRow=rdd.map(tup=>Row(tup._1,tup._2))
    //构建DataFrame的schema元数据信息（结构信息）--动态构建
    val schema=StructType(Array(
      StructField("name",StringType,true),
      StructField("age",IntegerType,true)
    ))
    //通过RddRow和Schema来创建DataFrame
    val stuDf=sparkSession.createDataFrame(rddRow,schema)
    stuDf.createTempView("student")
    val resDf=sparkSession.sql("select name,age from student where age >18")
    val resRdd=resDf.rdd.map(row=>(row(0).toString,row(1).toString.toInt))
      .collect()
      .foreach(println(_))
    sparkSession.stop()
  }
}
