package com.shujia.sql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo3CreateDataFrame {
  def main(args: Array[String]): Unit = {


    val spark: SparkSession = SparkSession
      .builder()
      .appName("sql")
      .master("local")
      .config("spark.sql.shuffle.partitions", 2)
      .getOrCreate()

    //导入隐式转换
    import spark.implicits._


    /**
      * 创建DF
      * 1、读取json文件
      * 2、读取csv格式的文件
      * 3、通过RDD创建DF
      * 4、连接jdbc创建df
      *
      */
    val df1: DataFrame = spark.read.json("spark/data/students.json")

    val df2: DataFrame = spark
      .read
      .option("sep", ",") //列的分割方式
      //.option("inferSchema", "true")//自动推断类型
      .schema("id STRING , name STRING ,age LONG , gender STRING , clazz STRING") //列名和类的类型，按顺序写
      .csv("spark/data/students.txt")

    df2.printSchema()
    df2.show()


    //创建一个类型为自定义类型的RDD
    val rdd: RDD[Student] = spark
      .sparkContext
      .textFile("spark/data/students.txt")
      .map(line => {
        val split: Array[String] = line.split(",")
        Student(split(0), split(1), split(2).toLong, split(3), split(4))
      })




    //直接将RDD转换成DF   需要到日一个隐式转换
    val stuDF: DataFrame = rdd.toDF()


    val rdd1: RDD[(String, String, Long, String, String)] = spark
      .sparkContext
      .textFile("spark/data/students.txt")
      .map(line => {
        val split: Array[String] = line.split(",")
        (split(0), split(1), split(2).toLong, split(3), split(4))
      })

    val df3: DataFrame = rdd1.toDF("id", "name", "age", "gender", "clazz")


    df3.show()


    // 连接jdbc创建df
    val jdbcDF: DataFrame = spark
      .read
      .format("jdbc")
      .option("url", "jdbc:mysql://master:3306")
      .option("dbtable", "test.student")
      .option("user", "root")
      .option("password", "123456")
      .load()


    jdbcDF.show(100)


  }

  case class Student(id: String, name: String, age: Long, gender: String, clazz: String)

}
