package com.bigdata.spark.core.rdd.builder

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.types.{ArrayType, IntegerType, StringType, StructField, StructType}

/**
 * @author Gerry chan
 * @version 1.0
 * Parquet文件 是一种列式存储格式，以二进制存储，文件中包含数据和元数据
 */
object ParDemo {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("parquetDemo")
      .getOrCreate()

    val sc = spark.sparkContext

    val list = List(
      ("张三", "red", Array(3, 4, 5)),
      ("李四", "black", Array(13, 14, 55)),
      ("王五", "orange", Array(23, 64, 45)),
      ("赵六", "blue", Array(33, 34, 35))
    )

    val rdd = sc.parallelize(list)
    //定义表结构

    val schema = StructType(
      Array(
        StructField("name", StringType),
        StructField("color", StringType),
        StructField("num", ArrayType(IntegerType))
      )
    )

    val rowRdd: RDD[Row] = rdd.map(x => Row(x._1, x._2, x._3))

    //RDD[Row]+ schema方式创建 dataframe
    val df = spark.createDataFrame(rowRdd, schema)
    //写parquet文件
    df.write.parquet("out/color")

    //读取
    val frame = spark.read.parquet("out/color")
    frame.printSchema()
    frame.show()

    spark.stop()
  }
}
