package cn.doitedu.rdd_ds

import cn.doitedu.beans.JavaStudent
import cn.doitedu.util.SparkUtil
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Encoders, Row}

/**
 * @Date 22.4.12
 * @Created by HANGGE
 * @Description
 */
object C04_DFDSRDD {
  def main(args: Array[String]): Unit = {

    val session = SparkUtil.getSession
    val  ls = List(
      Map[String,String]("id"->"uid001" , "name"->"zeze") ,
      Map[String,String]("id"->"uid002" , "name"->"yuhan")
    )
    val sc = session.sparkContext
    val rdd: RDD[Map[String, String]] = sc.parallelize(ls)

    println("==========================")
    val stu1 = new  JavaStudent(1, "zss", 23, "M", "Shanghai", 99.88)
    val stu2 = new JavaStudent(2, "lss", 43, "F", "Shanghai", 89.88)
    val  seq = Seq(stu1 , stu2)
    // 本地集合转换成RDD
    // RDD中的泛型是 scala的case class
    val rddx: RDD[JavaStudent] = sc.parallelize(seq)

    val encoder = Encoders.kryo(classOf[JavaStudent])
    import session.implicits._
    val ds = rdd.toDS()
    val df = ds.toDF()
    val ds2 = session.createDataset(rddx)(encoder)
    //  DataSet
    val rdd1: RDD[Map[String, String]] = ds.rdd
    val rdd2: RDD[JavaStudent] = ds2.rdd


    val rdd4: RDD[Row] = ds2.toDF().rdd
    val rdd3: RDD[Row] = df.rdd

    // DF 和  DS
    ds.toDF()






  }
}
