package com.itcast.spark

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._

/**
 * DESC:使用sparksql实现简单的wordcount
 */
object SparkSqlWordCount {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("SparkSqlWordCount").setMaster("local[*]")
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")
    import spark.implicits._

    //1-读取数据
    val data: DataFrame = spark.read.format("csv").option("header", true).option("inferschema", true).load("./datasets/input/iris.csv")
    //2-查看Scheme信息
    data.printSchema()
    /* root
     |-- sepal_length: double (nullable = true)
     |-- sepal_width: double (nullable = true)
     |-- petal_length: double (nullable = true)
     |-- petal_width: double (nullable = true)
     |-- class: string (nullable = true)*/
    //3-查看数据的3行数据
    //data.show(3,false)
    //4.使用该数据执行dsl和sql的风格写法
    data.select("sepal_length").show()
    data.select($"sepal_length").show()
    data.select(col("sepal_length")).show()
    data.select(column("sepal_length")).show()
    data.select('sepal_length).show() //这种写法会将数据转化为col(sepal_length)
    data.select('sepal_length.cast("string")).show()
    //5.使用sparksql的sql方式
    data.createOrReplaceTempView("table1")
    spark.sql("select * from table1")
    //这里全局的试图，可以跨session访问
    data.createOrReplaceGlobalTempView("table2")
    spark.sql("select * from  global_temp.table2")
    spark.newSession().sql("select * from  global_temp.table2")
    spark.newSession().sql("select * from  table1") //Table or view not found: table1; line 1 pos 15


    spark.stop()

  }
}
