package com.shujia.spark.sql

import java.util.Properties

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext

object Demo3CreateDF {
  def main(args: Array[String]): Unit = {

    /**
      * 创建DF的方式
      * 1、通过RDD创建DF
      * 2、通过读取文件创建
      * 3、通过jdbc创建DF
      *
      *
      */

    val conf = new SparkConf().setMaster("local").setAppName("app")

    //设置sparksql  默认分区数，   在产生shuffle过程中会起作用
    conf.set("spark.sql.shuffle.partitions", "1")

    val sc = new SparkContext(conf)

    // spark  sql 上下文对象  spark sql的入口
    val sqlContext = new SQLContext(sc)

    val studentRDD = sc.textFile("spark/data/students.txt")

    /**
      * rdd --> json   :  元组的方式
      *
      */
    val tupleRDD = studentRDD.map(line => {
      val split = line.split(",")
      val id = split(0)
      val name = split(1)
      val age = split(2).toInt
      val gender = split(3)
      val clazz = split(4)
      (id, name, age, gender, clazz)
    })

    //rdd装DF的隐式转换
    import sqlContext.implicits._

    //将RDD转换成DF
    // toDF  这方法需要导入隐式转换才可以用
    val studentDF = tupleRDD.toDF("id", "anme", "age", "gender", "clazz")

    studentDF.show()

    /**
      * rdd ---> DF  ： 样例类
      *
      */
    val beanRDD = studentRDD.map(line => {
      val split = line.split(",")
      val id = split(0)
      val name = split(1)
      val age = split(2).toInt
      val gender = split(3)
      val clazz = split(4)

      Student(id, name, age, gender, clazz)
    })

    //rdd转换成DF
    val beanDF = beanRDD.toDF()
    beanDF.show()


    /**
      * 读取json文件创建DF
      *
      */

    sqlContext.read.json("spark/data/students.json").show()

    /**
      * 读取parquet文件创建DF
      *
      */
    sqlContext.read.parquet("spark/data/parquet").show()


    /**
      * 通过jdbc读取mysql中的数据创建DF
      *
      */

    val poperties = new Properties()

    poperties.setProperty("driver", "com.mysql.jdbc.Driver")
    poperties.setProperty("user", "test")
    poperties.setProperty("password", "123456")

    val df = sqlContext.read.jdbc("jdbc:mysql://node1:3306/test", "student", poperties)

    df.show()

    /**
      * DF --> RDD
      *
      * row:代表一行 ，可以通过列名取数据
      */
    val rdd = df.map(row => {
      val id = row.getAs[String]("id")
      val name = row.getAs[String]("name")
      val age = row.getAs[Int]("age")
      (id, name, age)
    })

    rdd.foreach(println)

  }

  case class Student(id: String, name: String, age: Int, gender: String, clazz: String)

}
