package com.shujia.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Code02CSVSource {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("csv")
      .getOrCreate()

    val dataFrame: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      // 1500100001,施笑槐,22,女,文科六班
      .schema("id String,name String,age int,gender String,clazz String")
      .load("spark_code/data/students.txt")

//    val frame: DataFrame = spark.read.schema("id String,courseID String,score int").csv("spark_code/data/score.txt")
//    frame.show()

    /**
     * show():
     *    ① show方法可以展示当前dataFrame的数据，默认20条
     *    ② truncate参数可以指定当前列中展示长度
     *    ③ vertical 当设置为true时，会将表中的每一行数据的每一个列按行展示
     *
      */

    // numRows 表示展示的数据条数
    dataFrame.show(100)


    val wordDataFrame: DataFrame = spark
      .read
      .format("csv")
      .option("sep", " ")
      .load("spark_code/data/more_words.txt")


//    wordDataFrame.show(truncate = false)
//    wordDataFrame.show(20,truncate = 0, vertical = true)
//    dataFrame.show(20,truncate = 0, vertical = true)


    // 保存数据
//    dataFrame.write.csv("spark_code/data/stu_csv")
//
//    dataFrame
//      .write.format("csv")
//      .option("sep","\t")
//      .mode(saveMode = SaveMode.Overwrite)
//      .save("spark_code/data/stu_csv2")


//    dataFrame
//      .write
//      .json("spark_code/data/stu_json")


    dataFrame
      .write
      .format("json")
      .mode(saveMode = SaveMode.Overwrite)
      .save("spark_code/data/stu_json2")


    val wordDataFrame2: DataFrame = spark
      .read
      .format("csv")
      .option("sep", " ")
      .load("spark_code/data/more_words.txt")

    wordDataFrame2.show()

  }
}
