package com.peng.sparktest.sparksql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

import scala.beans.BeanProperty

object SparkSql02_APITest01 {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("spark").setMaster("local")
    val session: SparkSession = SparkSession
      .builder()
      .config(conf)
      //      .master("local")
      //      .appName("spark")
      //      .enableHiveSupport() //开启后将支持从hive进行元数据读取
      .getOrCreate()

    val context: SparkContext = session.sparkContext
    context.setLogLevel("ERROR")

    //DataFrame = RDD[Row]+ StructType    也就是：数据+元数据
    println("=====================1、RDD式数据表的简单案例==============================")

    //    val data: RDD[String] = context.textFile("./test_file/person.txt")

    //    val rows: RDD[Row] = data.map(_.split(" ")).map(arr => Row.apply(arr(0), arr(1).toInt))
    //    val fields = Array(
    //      StructField.apply("name", DataTypes.StringType, nullable = true),
    //      StructField.apply("age", DataTypes.IntegerType, nullable = true)
    //    )
    //    val structType: StructType = StructType.apply(fields)
    //    val dataFrame: DataFrame = session.createDataFrame(rows, structType)
    //    dataFrame.show()
    //    dataFrame.printSchema()
    //    dataFrame.createTempView("person")
    //    session.sql("select * from person") // 注册后就可以这样查询到了
    //   session.catalog.listTables().show()

    println("=====================2、动态版RDD版本RDD[ROWS]+StructType数据表==============================")
    //    val data: RDD[String] = context.textFile("./test_file/person.txt")
    //    val schema = Array("name string","age int")
    //
    //    def getValByType(v:(String,Int))={
    //      schema(v._2).split(" ")(1) match {
    //        case "string"=> v._1
    //        case "int"=>v._1.toInt
    //      }
    //    }
    //
    //
    //
    //    val rows: RDD[Row] = data.map(_.split(" "))
    //      .map(_.zipWithIndex)
    //      .map(item => item.map(getValByType(_)))
    //      .map(Row.fromSeq(_))
    //
    //    def  getStructType(s:String)={
    //      s match {
    //        case "string"=>DataTypes.StringType
    //        case "int"=>DataTypes.IntegerType
    //      }
    //    }
    //    val fields: Array[StructField] = schema.map(_.split(" ")).map(item=>StructField.apply(item(0),getStructType(item(1))))
    //    val structType: StructType = StructType.apply(fields)
    //或者，通过fromDDL直接获得表结构
    //    val structType1: StructType = StructType.fromDDL("name string,age int")

    //    val dataFrame: DataFrame = session.createDataFrame(rows,structType)
    //
    //    dataFrame.show()


    println("=====================3、RDD版本Bean方式数据表==============================")

    //    val data: RDD[String] = context.textFile("./test_file/person.txt")
    //    val person = new Person //作为闭包变量时，需要实现序列化
    //    val beanRDD: RDD[Person] = data.map(_.split(" ")).map(item => {
    //      person.setName(item(0))
    //      person.setAge(item(1).toInt)
    //      person
    //    })
    //    val dataFrame: DataFrame = session.createDataFrame(beanRDD, classOf[Person])
    //    dataFrame.show()


    println("=====================4、DataSet方式数据表==============================")
    //    import session.implicits._ //一般来说，直接引入这个即可
    //    val data: Dataset[String] = session.read.textFile("./test_file/person.txt")
    //    val rows: Dataset[(String, Int)] = data
    //      .map(item => {
    //        val strings: Array[String] = item.split(" ")
    //        (strings(0), strings(1).toInt)
    //      })(Encoders.tuple(Encoders.STRING, Encoders.scalaInt))
    //    //不引入上面包，也可以手动指定类型
    ////
    //    rows.toDF("name", "age") //dataset只包含数据，还需附加表类型
    //    rows.show()


    println("=====================5、WordCount==============================")
    import session.implicits._

    //dataset只含数据
    //    val data: Dataset[String] = List("hello world","hello hello","word me","press").toDS()

    //dataframe 是 数据+表结构
    val dataFrame: DataFrame = List("hello world",
      "hello hello",
      "word me",
      "press go").toDF("line")
    //1、sql版
    dataFrame.createTempView("lines")
    //count(*)要重命名，否则保存数据结果时会报错
    val res: DataFrame = session.sql(" select word, count(*) as cout from   (select explode(split(line,' ')) as word from lines) as tt   group by tt.word  ")
    res.show()

    //2、api版
    val apiRes: DataFrame = dataFrame.selectExpr("explode(split(line,' ')) as word").groupBy("word").count()
    apiRes.show()
    //以上两种，第二种更快， 因为第一种需要sql字符串解析

    //序列化成文件
    res.write.mode(SaveMode.Append).parquet("./test_file/store")


    //读取序列化文件
    val deSer: DataFrame = session.read.parquet("./test_file/store")
    deSer.show()
    deSer.printSchema()
    session.read
    /*
    基于文件的行式：
    session.read.parquet()
    session.read.textFile()
    session.read.json()
    session.read.csv()
    读取任何格式的数据源都要转换成DF
    res.write.parquet()
    res.write.orc()
    res.write.text()
    */


  }

}

class Person extends Serializable {
  @BeanProperty
  var name: String = "";
  @BeanProperty
  var age: Int = 0
}
