package cn.doitedu.df_rdd

import cn.doitedu.util.SparkUtil
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{DataTypes, StructType}

/**
 * @Date 22.4.12
 * @Created by HANGGE
 * @Description
 *
 */
object C07_RDD_2_DF_Colection {
  def main(args: Array[String]): Unit = {
    val session = SparkUtil.getSession
    //获取sparkContext
    val sc = session.sparkContext
// 1   Seq 集合
    val  seq1 = Seq(1,2,3,4,5)
    val  seq2 = Seq(6,7,8,9,10)
    val ls = List(seq1 , seq2)

    val rdd: RDD[Seq[Int]] = sc.parallelize(ls)

    import session.implicits._
    val df = rdd.toDF("number")
    // 操作自己补全

    df.show()

    /**
     * root
     * |-- number: array (nullable = true)
     * |    |-- element: integer (containsNull = false)
     */
   df.printSchema()
 // Map集合

    val  mp1 = Map("zss"->23 , "lss"->33)
    val  mp2 = Map("ww"->23 , "qq"->33)

    val mps = List(mp1 , mp2)


    val df2 = sc.parallelize(mps).toDF("mp")

    /**
     * root
     * |-- k: map (nullable = true)
     * |    |-- key: string
     * |    |-- value: integer (valueContainsNull = false)
     */
    df2.printSchema()
    df2.show()
    // 处理集合中的数据
    df2.selectExpr("mp['zss']", "map_keys(mp)" , "map_values(mp)").show()

    // Set集合  自己补充


  }

}
