package com.shujia.sql

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object Demo06RDDToDF {
  def main(args: Array[String]): Unit = {
    // 创建SparkSession
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo06RDDToDF")
      .master("local")
      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext

    // 构建一个RDD
    val stuRDD: RDD[String] = sc.textFile("Spark/data/students.txt")

    // 将RDD变成DF

    // 导入隐式转换
    import spark.implicits._

    // 手动转换
    val stuDF1: DataFrame = stuRDD
      // 将每条数据转换成元组的形式 相当于将每一列切开
      .map(line => {
        val stuArr: Array[String] = line.split(",")
        (stuArr(0), stuArr(1), stuArr(2).toInt, stuArr(3), stuArr(4))
      }).toDF("id", "name", "age", "gender", "clazz")

    stuDF1.show()

    // 通过样例类对象进行转换
    val stuDF2: DataFrame = stuRDD
      // 将每条数据转换成样例类对象
      .map(line => {
        val stuArr: Array[String] = line.split(",")
        StuRDDToDF(stuArr(0), stuArr(1), stuArr(2).toInt, stuArr(3), stuArr(4))
      })
      // 不需要指定列名 默认以样例类对象的属性名作为列名
      .toDF()

    stuDF2.show()

    // 将DataFrame转成RDD

    // 每一条数据都是Row对象这样的RDD
    val rdd: RDD[Row] = stuDF1.rdd
    rdd.map(row => (row.getAs[String]("clazz"), 1))
      .reduceByKey(_ + _)
      .foreach(println)

  }

}

case class StuRDDToDF(id: String, name: String, age: Int, gender: String, clazz: String)
