package com.shujia.sql

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object Demo5RDDToDF {
  def main(args: Array[String]): Unit = {
    /**
     * 在新版本的spark中，如果想要编写spark sql的话，需要使用新的spark入口类：SparkSession
     */
    val sparkSession: SparkSession = SparkSession.builder()
      .master("local")
      .appName("RDD和DF互相转换演示")
      .config("spark.sql.shuffle.partitions", 1) //默认分区的数量是200个
      .getOrCreate()

    //导入Spark sql中所有的sql隐式转换函数
    import org.apache.spark.sql.functions._
    //导入另一个隐式转换，后面可以直接使用$函数引用字段进行处理,如果需要做RDD和DF之间的转换
    import sparkSession.implicits._


    /**
     * 使用SparkContext读取数据封装成RDD
     *
     * SparkSession包含了SparkContext
     */
    //使用SparkSession获取SparkContext
    val sc: SparkContext = sparkSession.sparkContext
    val linesRDD: RDD[String] = sc.textFile("spark/data/students.csv")
    val studentsRDD: RDD[(String, String, Int, String, String)] = linesRDD.map((line: String) => line.split(","))
      .map {
        //1500100001,施笑槐,22,女,文科六班
        case Array(id: String, name: String, age: String, gender: String, clazz: String) =>
          (id, name, age.toInt, gender, clazz)
      }

    /**
     * RDD转DF
     */
    val studentsDF: DataFrame = studentsRDD.toDF("id", "name", "age", "gender", "clazz")
    studentsDF.createOrReplaceTempView("students")

    val resultDF: DataFrame = sparkSession.sql(
      """
        |select
        |clazz,
        |count(1) as number
        |from
        |students
        |group by clazz
        |""".stripMargin)

    /**
     * 在Row的数据类型中 所有整数类型统一为Long  小数类型统一为Double
     * 转RDD
     */
    val studentsRDD2: RDD[Row] = resultDF.rdd
//    studentsRDD2.map((row:Row)=>{
//      val clazz: String = row.getAs[String]("clazz")
//      val number: Long = row.getAs[Long]("number")
//      (clazz,number)
//    }).foreach(println)

    studentsRDD2.map{
      case Row(clazz:String,number:Long)=>
        (clazz,number)
    }.foreach(println)





  }
}
