package com.shujia.sql

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object Demo3RDDToDF {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("api")
      .getOrCreate()

    //导入隐式转换
    import spark.implicits._
    //导入所有的函数
    import org.apache.spark.sql.functions._


    //获取sparkContext
    val sc: SparkContext = spark.sparkContext

    val stdeuntRDD: RDD[String] = sc.textFile("data/students.txt")

    val stuRDD: RDD[(String, String, Int, String, String)] = stdeuntRDD.map(stu => {
      val split: Array[String] = stu.split(",")
      (split(0), split(1), split(2).toInt, split(3), split(4))
    })
    /**
      * 将RDD转换成DF ,  rdd的类型是一个元组， 转换的时候需要指定列名
      *
      */

    val df: DataFrame = stuRDD.toDF("id", "name", "age", "gender", "clszz")

    //df.show()

    /**
      * DF 转换成RDD  类型是ROW 类型，  row 代表一行数据，可以使用列名获取列值
      *
      */

    val rdd: RDD[Row] = df.rdd

    /**
      * 解析Row
      *
      */
    val tRDD: RDD[(String, String, Int, String, String)] = rdd.map(row => {
      val id: String = row.getAs[String]("id")
      val name: String = row.getAs[String]("name")
      val age: Int = row.getAs[Int]("age")
      val gender: String = row.getAs[String]("gender")
      val clazz: String = row.getAs[String]("clazz")
      (id, name, age, gender, clazz)
    })

    /**
      * 使用模式匹配
      *
      */

    val trdd2: RDD[(String, String, Int, String, String)] = rdd.map {
      case Row(id: String, name: String, age: Int, gender: String, clazz: String) =>
        (id, name, age, gender, clazz)
    }

    trdd2.foreach(println)

  }

}
