package cn.doitedu.rdd_ds

import cn.doitedu.beans.Student
import cn.doitedu.util.SparkUtil
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Dataset

/**
 * @Date 22.4.12
 * @Created by HANGGE
 * @Description
 */
object C01_CaseClass_2_DataSet {
  def main(args: Array[String]): Unit = {

    val stu1 = Student(1, "zss", 23, "M", "Shanghai", 99.88)
    val stu2 = Student(2, "lss", 43, "F", "Shanghai", 89.88)
    val  seq = Seq(stu1 , stu2)
    // 本地集合转换成RDD
    val session = SparkUtil.getSession
    // RDD中的泛型是 scala的case class
    val rdd: RDD[Student] = session.sparkContext.parallelize(seq)
    // 导入spark-sql中的隐式内容
    import  session.implicits._   // 包含了Encoder[样例类]

    val ds1: Dataset[Student] = session.createDataset(rdd)
    val ds2: Dataset[Student] = rdd.toDS()

    ds1.printSchema()
    ds2.printSchema()

    ds1.show()
    ds2.show()


  }

}
