package com.shujia.sql

import java.util.Properties

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SQLContext, SaveMode}

object Demo2CreateDF {
  def main(args: Array[String]): Unit = {

    /**
      * 创建df
      * 1、读取json格式的文件创建
      * 2、通过rdd创建
      * 自定义类
      * 元祖
      *
      * 3、通过jdbc创建
      *
      * 4、读取parquet文件创建df
      *
      *
      *
      */


    val conf = new SparkConf().setMaster("local[4]").setAppName("Demo1sql")

    //设置spark sql 默认并行度
    conf.set("spark.sql.shuffle.partitions", "2")

    val sc = new SparkContext(conf)

    //spark  sql 入口
    val sqlContext = new SQLContext(sc)


    val rdd = sc.textFile("spark/data/students.txt")

    val studentRDD: RDD[Student] = rdd.map(line => {
      val split = line.split(",")
      val id = split(0)
      val name = split(1)
      val age = split(2).toInt
      val gender = split(3)
      val clazz = split(4)

      Student(id, name, age, gender, clazz)
    })

    import sqlContext.implicits._

    //rdd  ->  DF
    /**
      * 1、将rdd数据类型转换成自定义类的对象
      *
      */
    val studentDF = studentRDD.toDF()
    studentDF.show()


    /**
      * 将 RDD数据转换成一个元祖
      *
      */
    val studentDF1 = rdd.map(line => {
      val split = line.split(",")
      val id = split(0)
      val name = split(1)
      val age = split(2).toInt
      val gender = split(3)
      val clazz = split(4)

      (id, name, age, gender, clazz)
    }).toDF("id", "name", "age", "gender", "clazz") //指定列名


    //将df注册成一张临时表
    studentDF1.registerTempTable("student")

    //统计班级人数

    sqlContext.sql("select clazz,count(1) from student group by clazz").show()


    /**
      * 连接mysql  创建df
      *
      */

    val properties = new Properties()

    properties.setProperty("driver", "com.mysql.jdbc.Driver")
    properties.setProperty("user", "root")
    properties.setProperty("password", "123456")

    val mysqlDF = sqlContext.read.jdbc("jdbc:mysql://node1:3306/student", "student", properties)

    mysqlDF.show()


    /**
      * 将df数据保存到hdfs
      *
      */

    studentDF1
      .write
      .mode(SaveMode.Overwrite) //如果存在直接覆盖
      .parquet("spark/data/parquet")


    /**
      * 读取parquet文件创建df
      *
      */

    val parDF = sqlContext.read.parquet("spark/data/parquet")

    parDF.show()

  }
}

case class Student(id: String, name: String, age: Int, gender: String, clazz: String)