package spark.sql

import org.apache.spark.sql.types._
import org.apache.spark.sql.{Row, SparkSession}

/**
  *
  * @author com.ymy.hadoop   
  * @since 2019/6/4 21:46
  */
object SparkDemo02 {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession
      .builder()
      .appName("SparkDemo02")
      .master("local[*]")
      .getOrCreate()

    val schema = StructType(Array(
      StructField("name",StringType,false),
      StructField("age",IntegerType,false),
      StructField("address",StringType,false),
      StructField("birthday",DateType,false)
    ))

   val rowRDD = spark.sparkContext.parallelize(Seq(
      Row("zhangsan",10,"beijing",java.sql.Date.valueOf("2008-01-01")),
      Row("lisi",20,"shanghai",java.sql.Date.valueOf("1998-01-01"))
    ), 2)

    val rddSchemaDF = spark.createDataFrame(rowRDD,schema)

//    rddSchemaDF.show(false)

//    println(rddSchemaDF.col("name").toString())
//    println(rddSchemaDF.col("name"))

    // 拿到dataframe里面的值的方法
    println(rddSchemaDF.first().get(1))
    println(rddSchemaDF.head(2).length)

    for (row: Row <- rddSchemaDF.head(2)){
      println(row.get(1))
    }

//      var age = 0
//      rddSchemaDF.rdd.map(row =>{
//         age = row.get(1).asInstanceOf[Int]
//         age
//      }).collect().foreach(x=>{
//         age = x
//      })
//  println("age=" + age)
    spark.stop()
  }


}
