package spark.sql

import org.apache.spark.sql.SparkSession

/**
  *
  * @author com.ymy.hadoop   
  * @since 2019/6/4 21:29
  */
object SparkDemo01 {

  case class Person(name:String,age:Int,address:String,date:java.sql.Date) extends Serializable

  def main(args: Array[String]): Unit = {

    val spark = SparkSession
      .builder()
      .appName("SparkDemo01")
      .master("local[*]")
      .getOrCreate()

    import spark.implicits._

    val seq = Seq(
      ("zhangsan",10,"beijing",java.sql.Date.valueOf("2008-01-01")),
    ("lisi",20,"shanghai",java.sql.Date.valueOf("1998-01-01"))
    )
    // toDF() 后面不写具体的列名，则是_1、_2、_3、_4
//    val df = seq.toDF("name","age","address","birthday")
//    df.show()
//    df.printSchema()

//    val rdd = spark.sparkContext.parallelize(seq,2)
//    val rddToDF = rdd.toDF()
//    rddToDF.printSchema()
//    rddToDF.show(false)

    val personSeq = Seq(
      Person("zhangsan",10,"beijing",java.sql.Date.valueOf("2008-01-01")),
      Person("lisi",20,"shanghai",java.sql.Date.valueOf("1998-01-01"))
    )

    val df2 = personSeq.toDF("x","y","z","d")
    df2.show(false)
    df2.printSchema()

    spark.stop()
  }
}
