package com.ww.spark.sql

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalog.{Database, Table}
import org.apache.spark.sql.types.{DataType, DataTypes, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, RelationalGroupedDataset, Row, SaveMode, SparkSession, catalog}

import scala.beans.BeanProperty

object SQL02_api01 {


  class Person{
    @BeanProperty
    var name:String = ""
    @BeanProperty
    var age:Int = 0
  }


  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local").setAppName("sql02")
    val session = SparkSession.builder().config(conf).getOrCreate()
    val context = session.sparkContext
    val rdd: RDD[String] = context.textFile("data/person.txt")
    val arr: RDD[Array[String]] = rdd.map(line => line.split(" "))
    val row: RDD[Row] = arr.map(arrays =>
      Row.apply(arrays(0), arrays(1).toInt)
    )
    val fields: Array[StructField] = Array(StructField.apply("name", DataTypes.StringType, true), StructField.apply("age", DataTypes.IntegerType, true))

    val schema: StructType = StructType.apply(fields)
    val frame: DataFrame = session.createDataFrame(row, schema)
    frame.show()
    frame.printSchema()
    frame.createTempView("person")
    session.sql("select * from person").show()

    println("\n 练习自动加载数据 =====================================================================================================================")
    def getDataType(v:String): DataType ={
      v match {
        case "string" => DataTypes.StringType
        case "int" => DataTypes.IntegerType
      }
    }


    //字段配置数组
    val fieldsConf: Array[String] = Array(
      "name string",
      "age int",
      "sex string"
    )
    def toDataType(vi:(String,Int)) ={

      fieldsConf(vi._2).split(" ")(1) match {
        case "string" => vi._1
        case "int" => vi._1.toInt
      }
    }
    //row
    val line: RDD[String] = context.textFile("data/person1.txt")
    val zip: RDD[Array[(String, Int)]] = line.map(_.split(" ")).map(_.zipWithIndex)
    //((zhangsan,0), (18,1), (男,2))
    //zip.map(_.toList).foreach(println)
    val transZip: RDD[Array[Any]] = zip.map(z => z.map(toDataType))
    //transZip.map(_.toList).foreach(println)
    //(hangman, 18, 男)
    val rows: RDD[Row] = transZip.map(Row.fromSeq(_))



    //structType
    val schema1: Array[StructField] = fieldsConf.map(_.split(" ")).map(arr => StructField.apply(arr(0), getDataType(arr(1)), true))

    //dataframe
    val frame1: DataFrame = session.createDataFrame(rows, StructType.apply(schema1))
    frame1.show()
    frame1.printSchema()



    println("\n bean =====================================================================================================================")
    val bean: RDD[Person] = rdd.map(_.split(" ")).map(arr => {
      val person = new Person()
      person.name = arr(0)
      person.age = arr(1).toInt
      person
    })

    val frame2: DataFrame = session.createDataFrame(bean, classOf[Person])
    frame2.show()

    println("\n dataset =====================================================================================================================")

    import session.implicits._
    val ds001: Dataset[String] = session.read.textFile("data/person.txt")
    val person: Dataset[(String, String)] = ds001.map(line => {
      val strings: Array[String] = line.split(" ")
      (strings(0), strings(1))
    })
    val frame3: DataFrame = person.toDF("name", "age")
    frame3.show()

    println("\n 单词统计 =====================================================================================================================")

    import  session.implicits._

    val dataDF: DataFrame = List(
      "hello world",
      "hello world",
      "hello msb",
      "hello world",
      "hello world",
      "hello spark",
      "hello world",
      "hello spark"
    ).toDF("line")
    dataDF.createTempView("line")
    val frame4: DataFrame = session.sql("select word,count(*) from (select explode(split(line,' ')) as word from line) group by word")
    frame4.show()
    println("\n 单词统计 面向api=====================================================================================================================")
    val frame5: DataFrame = dataDF.selectExpr("explode(split(line,' ')) as word")
    val dataset: RelationalGroupedDataset = frame5.groupBy("word")
    val frame6: DataFrame = dataset.count()
    frame6.show()

    println("\n 单词统计 读写parquet格式的文件=====================================================================================================================")
    //写成parquet格式的文件
    frame6.write.mode(SaveMode.Append).parquet("out/ooxx")
    //读parquet格式的文件
    val frame7: DataFrame = session.read.parquet("out/ooxx")
    frame7.show()
  }
}
