package com.shujia.sql

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object Demo7RDDAndDF {
  def main(args: Array[String]): Unit = {

    //1、创建Spark sql环境
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("sql")
      .config("spark.sql.shuffle.partitions", 1) //默认在集群中时200个
      .getOrCreate()

    //隐式转换：可以东给对象增加新的方法
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //获取sparkContext
    val sc: SparkContext = spark.sparkContext

    //读取数据得到RDD
    val linesRDD: RDD[String] = sc.textFile("spark/data/students.csv")

    //解析数据
    val studentsRDD: RDD[(String, String, Int, String, String)] = linesRDD
      .map(line => {
        val split: Array[String] = line.split(",")
        (split(0), split(1), split(2).toInt, split(3), split(4))
      })

    /**
     * RDD转换成DF
     */

    //将RDD转换成DF
    val studentDF: DataFrame = studentsRDD.toDF("id", "name", "age", "sex", "clazz")

    studentDF.show()

    /**
     * DF 转换成RDD
     */
    val stuRDD: RDD[Row] = studentDF.rdd

    //获取ROw对象的数据
    stuRDD.map {
      case Row(id: String, name: String, age: Int, sex: String, clazz: String) =>
        (id, name, age, sex, clazz)
    }.foreach(println)
  }

}
