

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

object DataFrameDemo01 {
  def main(args: Array[String]): Unit = {
    //创建SparkSession对象
    val spark=SparkSession.builder().appName("sparksql-demo").master("local[2]").getOrCreate()
    //创建DataFrame对象：分布式的二维表
    val df1: DataFrame = spark.read.text("data/person.txt")
    //显示DataFrame列的数据类型
    df1.printSchema()
    df1.show()
    println("-"*20)
    val df2: DataFrame = spark.read.csv("data/csv")
    df2.printSchema()
    df2.show()
    println("-"*20)
    val df3: DataFrame = spark.read.json("data/json")
    df3.printSchema()
    df3.show()

    //rdd--> dataframe
    //创建SparkContext: 由SparkSession对象得到SparkContext对象
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")
    val personRdd: RDD[String] = sc.textFile("data/person.txt")
    //调用toDF方法将rdd转换为dataframe ， 需要先导包
    import spark.implicits._
    val personDf: DataFrame = personRdd.toDF()
    personDf.printSchema()
    personDf.show()

    //约束(列的数据类型)需要手动去设置
    val personRdd1: RDD[String] = sc.textFile("data/person.txt")
    //分割
    val personRdd2: RDD[(Int, String, Int)] = personRdd1.map(line => {
      val arr: Array[String] = line.split("\\s+")
      (arr(0).toInt, arr(1), arr(2).toInt)
    })
    //rdd转换dataframe，并指定列名
    val personDf1: DataFrame = personRdd2.toDF("id", "name", "age")
    personDf1.printSchema()
    personDf1.show()



  }

}
