package day01_create

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @author wsl
 * @version 2022-10-12
 *          SparkSession内部封装了SparkContext
 */
object RddAndDF {
  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf().setAppName("spark sql").setMaster("local[*]")
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()

//    val df: DataFrame = spark.read.json("sparksql/input/user.json")
//    df.show()


    val sc: SparkContext = spark.sparkContext
    val rdd: RDD[(String, Long)] = sc.textFile("sparksql/input/user.txt")
      .map(
        line => {
          val data: Array[String] = line.split(",")
          (data(0), data(1).toLong)
        }
      )

    //RDD和DF、DS转换必须要导的包(隐式转换),这里的spark是自定义的变量
    import spark.implicits._
    //rdd-->df
    //方式1.普通
    val df: DataFrame = rdd.toDF() //普通rdd转换df，因为rdd缺少元素，所以转换出来的df没有列名
    df.show()
    val df1: DataFrame = rdd.toDF("name", "age") //手动添加

    //方式2 样例类 样例类自带属性
    val userRdd: DataFrame = rdd.map {
      case (name, age) => User(name, age)
    }.toDF()


    //df-->rdd
    df1.rdd
      .map(
        row=>{
          User(row.getString(0), row.getLong(1))
        }
      ).foreach(println)

    userRdd.rdd
      .map(
        row=>{
          User(row.getString(0), row.getLong(1))
        }
      ).foreach(println)

    spark.stop()


  }
}
