package chapter10

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{DataTypes, StructType}
import org.apache.spark.sql.{Row, SparkSession}

/**
 * author: 余辉
 * blog: https://blog.csdn.net/silentwolfyh
 * descriptions: 1.1.1.7从RDD[set/seq/map]中创建DataFrame
 * date: 2024 - 09 - 02 2:24 下午
 */

object RDDToDF07 {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder().appName("").master("local[*]").getOrCreate()

    val seq1 = Seq(1, 2, 3, 4)
    val seq2 = Seq(11, 22, 33, 44)

    val rdd: RDD[Seq[Int]] = spark.sparkContext.parallelize(List(seq1, seq2))

    import spark.implicits._
    val df = rdd.toDF()

    df.printSchema()
    df.show()


    df.selectExpr("value[0]", "size(value)").show()


    /**
     * set类型数据rdd的编解码
     */
    val set1 = Set("a", "b")
    val set2 = Set("c", "d", "e")
    val rdd2: RDD[Set[String]] = spark.sparkContext.parallelize(List(set1, set2))
    val df2 = rdd2.toDF("members")
    df2.printSchema()
    df2.show()


    /**
     * map类型数据rdd的编解码
     */

    val map1 = Map("father" -> "mayun", "mother" -> "tangyan")
    val map2 = Map("father" -> "huateng", "mother" -> "yifei", "brother" -> "sicong")
    val rdd3: RDD[Map[String, String]] = spark.sparkContext.parallelize(List(map1, map2))

    val df3 = rdd3.toDF("jiaren")
    df3.printSchema()
    df3.show()

    df3.selectExpr("jiaren['mother']", "size(jiaren)", "map_keys(jiaren)", "map_values(jiaren)")
      .show(10, false)
    spark.close()
  }

}