package cn.doitedu.rdd_ds

import cn.doitedu.beans.{JavaStudent, Student}
import cn.doitedu.util.SparkUtil
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Dataset, Encoders}

/**
 * @Date 22.4.12
 * @Created by HANGGE
 * @Description
 */
object C02_JavaBean_2_DataSet {
  def main(args: Array[String]): Unit = {

    val stu1 = new  JavaStudent(1, "zss", 23, "M", "Shanghai", 99.88)
    val stu2 = new JavaStudent(2, "lss", 43, "F", "Shanghai", 89.88)
    val  seq = Seq(stu1 , stu2)
    // 本地集合转换成RDD
    val session = SparkUtil.getSession
    // RDD中的泛型是 scala的case class
    val rdd: RDD[JavaStudent] = session.sparkContext.parallelize(seq)
    // 导入spark-sql中的隐式内容

    import session.implicits._   // 包含了Encoder[样例类]
    // 定义一个encoder
    // 自定义的类需要序列化
    val encoder = Encoders.kryo(classOf[JavaStudent])
   // 底层的数据就是二进制字节
    val ds1: Dataset[JavaStudent] = session.createDataset(rdd)(encoder)
    // 手动的解析数据的结构
    val ds2: Dataset[(Int, Int, String)] = ds1.map(stu => {
      (stu.getId, stu.getAge, stu.getCity)
    })

    val df = ds2.toDF("id", "age", "city")

    df.printSchema()
    df.show(false)


  }

}
