package doit20.sparksql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Encoders, Row, SparkSession}

/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-04-09
 * @desc 从RDD[T]转成Dataset（DataFrame）
 */

case class Person(id:Int,name:String,age:Int,salary:Float)

object Demo3 {
  def main(args: Array[String]): Unit = {

    val session = SparkSession.builder()
      .appName("")
      .master("local")
      .getOrCreate()

    // rddPrimitive2DsV2(session)
    // rddCaseClass2Ds(session)
    // rddJavaBean2Dataset(session)
    // rddTuple2Dataset(session)
    //rddScalaBean2Dataset(session)
    rddMap2Dataset(session)

  }


  /**
   * 将rdd转成dataframe的简洁方式
   * 最常用的方式
   * @param session
   */
  def rddPrimitive2DsV2(session:SparkSession): Unit ={
    val sparkContext = session.sparkContext
    val rdd: RDD[Int] = sparkContext.parallelize(Seq(1, 2, 3, 4, 5))

    //import session.implicits.rddToDatasetHolder
    //import session.implicits.newIntEncoder
    import session.implicits._
    // 之所以rdd上可以直接toDF,是因为上面导入的隐式转换  session.implicits._
    // 隐式上下文中，有 rddToDatasetHolder隐式方法，它负责把rdd转成Dataset，而newIntEncoder负责引入转换时所需的Encoder[Int]
    val frame = rdd.toDF("nb")
    frame.show()

    session.close()
  }


  /**
   * 这里是将rdd转成ds及df的完整流程
   * @param session
   */
  def rddPrimitive2Ds(session:SparkSession): Unit ={

    // 获取一个sparkContext来构造RDD
    val sparkContext = session.sparkContext
    val rdd1: RDD[Int] = sparkContext.parallelize(Seq(1, 2, 3, 4, 5))

    // 将rdd[Int] 转成 dataset[Int]，从此就拥有了表结构
    import session.implicits._
    val ds: Dataset[Int] = session.createDataset(rdd1)(Encoders.scalaInt)
    ds.printSchema()
    ds.show()

    // 将dataset[Int] ，转成Dataset[Row] ==> DataFrame
    val df = ds.toDF("nb")
    df.printSchema()
    df.show()

    session.close()
  }

  /**
   * 将RDD[CASE]转成ds或df
   * @param session
   */
  def rddCaseClass2Ds(session:SparkSession): Unit ={
    val context = session.sparkContext
    val rdd: RDD[Person] = context.parallelize(Seq(
      Person(1,"a",18,2888.0f),
      Person(2,"b",28,3888.0f),
      Person(3,"c",38,4888.0f),
      Person(4,"d",48,5888.0f),
      Person(5,"e",58,6888.0f)
    ))


    // 方式1： 手动调 createDataset()方法并传Encoder
    // Encoders.product 这种Encoder，会从case class中通过反射的手段
    // 将class中的成员变量名作为表结构的字段名，成员变量类型作为表结构字段的类型
    val ds: Dataset[Person] = session.createDataset(rdd)(Encoders.product)

    val df: Dataset[Row] = ds.toDF()


    // 方式2：导入隐式转换，直接在rdd上调toDF
    import session.implicits._
    val ds1: Dataset[Person] = rdd.toDS()
    val df1: DataFrame = rdd.toDF()

    ds.printSchema()
    df.printSchema()
  }


  def rddTuple2Dataset(session:SparkSession): Unit ={
    val context = session.sparkContext

    val rdd: RDD[Tuple3[Int,String,Int]] = context.parallelize(Seq(
      (1,"zs",18),
      (2,"ls",18),
      (3,"ww",18),
      (4,"rr",18)
    ))

    import session.implicits._
    val df = rdd.toDF("id","name","age")
    df.printSchema()
    df.show()

    session.close()

  }



  def rddJavaBean2Dataset(session:SparkSession): Unit ={
    val context = session.sparkContext

    val rdd: RDD[Stu] = context.parallelize(Seq(
      new Stu(1, "a", true),
      new Stu(2, "b", true),
      new Stu(3, "c", false),
      new Stu(4, "d", false)
    ))

    // javabean类型，在implicits中没有对应的获取Encoder的隐式方法
    // 所以无法直接 rdd.toDF
    // 只能手动调用 session.createDataset()方法，并传入对应的Encoder
    val ds: Dataset[Stu] = session.createDataset(rdd)(Encoders.bean(classOf[Stu]))
    ds.printSchema()
    ds.show()

    session.close()
  }


  def rddScalaBean2Dataset(session: SparkSession): Unit ={
    val context = session.sparkContext

    val rdd: RDD[Teacher] = context.parallelize(Seq(
      new Teacher(1, "a"),
      new Teacher(2, "b"),
      new Teacher(3, "c"),
      new Teacher(4, "d")

    ))

    // 普通scala类对象，只能当做普通javabean来获取Encoder，而且这个类中必须有字段的get和set方法
    val ds = session.createDataset(rdd)(Encoders.bean(classOf[Teacher]))
    ds.printSchema()
    ds.show()
    session.close()
  }


  /**
   * map类型的RDD转dataset
   * @param session
   */
  def rddMap2Dataset(session: SparkSession): Unit ={
    val context = session.sparkContext
    val rdd: RDD[Map[String,Int]] = context.parallelize(Seq(

      Map("a"->1,"b"->2),
      Map("c"->1,"d"->2),
      Map("t"->1),
      Map("y"->1),
      Map("h"->1),

    ))

    import session.implicits._
    val ds = rdd.toDS()
    ds.printSchema()
    ds.show()

    session.close()

  }

}
