package sparkSql.dataframe

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, Dataset, Row}
//import org.apache.spark.sql.SparkSession
//import org.slf4j.LoggerFactory


/**
  * Dataframe 底层是RDD，Dataset底层是Dataframe
  *
  * session 狭义表示的是一种范围（把数据存到session中），广义表示的是一种状态（处于一个session）
  *
  */
object TestDataframe {
	
	import org.apache.spark.sql.SparkSession
	
	def main(args: Array[String]): Unit = {
		
		val conf: SparkConf = new SparkConf().setAppName("slq").setMaster("local[*]")
		val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
		import spark.implicits._
		
		// 读取json文件并判断json数据的类型
		val df: DataFrame = spark.read.json("testData/input/word/test.json")
		
		// 创建临时只读视图，view不能修改
		df.createTempView("person")
		spark.sql("select name from person")
		
		val sparkSession: SparkSession = spark.newSession()
		//		sparkSession.sql("select name from person") // 不在session范围内，会报错Table or view not found: person
		
		//  创建global范围的只读视图，查询的时候加 global_temp
		df.createGlobalTempView("personGlobal")
		spark.sql("select name from global_temp.personGlobal")
		
		// DSL 风格的查询
		val df_name: DataFrame = df.select("name")
		df_name.show()
		
		spark.close()
	}
}

/**
  * DSL 风格的SQL
  */
object TestDataframe1 {
	
	import org.apache.spark.sql.SparkSession
	
	def main(args: Array[String]): Unit = {
		
		val conf: SparkConf = new SparkConf().setAppName("slq").setMaster("local[*]")
		
		val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
		
		// 读取json文件并判断json数据的类型
		val df: DataFrame = spark.read.json("testData/input/word/test.json")
		
		// 创建临时只读视图，view不能修改
		df.createTempView("person")
		
		// DSL 风格的查询
		df.select(df.col("age") + 1, df.col("name")).show()
		
		// 过滤操作
		df.filter(df.col("age") > 13).show()
		
	}
}

/**
  * RDD 和 DataFrame 之间的转换
  * 注意：需要引入 import spark.implicits._;spark 指的是sparkSession对象
  */
object TestDataframe2 {
	
	import org.apache.spark.sql.SparkSession
	
	def main(args: Array[String]): Unit = {
		
		val conf: SparkConf = new SparkConf().setAppName("slq").setMaster("local[*]")
		
		val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
		import spark.implicits._
		
		// 创建RDD
		val listRDD: RDD[Int] = spark.sparkContext.makeRDD(Array(1, 2, 3))
		
		// 传入结构
		val df: DataFrame = listRDD.toDF("id")
		df.show()
		
		val pairRDD: RDD[(String, Int)] = spark.sparkContext.makeRDD(Array(("zs", 1), ("zs2", 2), ("zs3", 3)))
		// 传入结构
		pairRDD.toDF("name", "id").show()
		
	}
}

/**
  * RDD 和 DataFrame 之间的转换2
  */
case class Person(name: String, age: Int)

object TestDataframe3 {
	
	import org.apache.spark.sql.SparkSession
	
	def main(args: Array[String]): Unit = {
		
		val conf: SparkConf = new SparkConf().setAppName("slq").setMaster("local[*]")
		
		val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
		import spark.implicits._
		
		// 创建RDD
		val listRDD: RDD[Int] = spark.sparkContext.makeRDD(Array(1, 2, 3))
		
		val rdd: RDD[(String, Int)] = spark.sparkContext.makeRDD(Array(("zhangsan", 20), ("lisi", 30), ("wangwu", 40)))
		
		val personRDD: RDD[Person] = rdd.map(p => Person(p._1, p._2))
		// Person 中已经有结构就不用加字段名称了
		val personDF: DataFrame = personRDD.toDF()
		personDF.show()
		
		// 转回来的结构是org.apache.spark.sql.Row类型的
		val rowRDD: RDD[Row] = personDF.rdd
	}
}

/**
  * DataSet 的基本操作
  * DataFrame 只有结构的概念，没有属性的概念
  * DataSet 是具有强类型的数据结构
  */

object TestDataframe4 {
	
	case class Person(name: String, age: Int)
	
	import org.apache.spark.sql.SparkSession
	
	def main(args: Array[String]): Unit = {
		
		val conf: SparkConf = new SparkConf().setAppName("slq").setMaster("local[*]")
		
		val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
		import spark.implicits._
		
		// 创建RDD
		val listRDD: RDD[Int] = spark.sparkContext.makeRDD(Array(1, 2, 3))
		
		val rdd: RDD[(String, Int)] = spark.sparkContext.makeRDD(Array(("zhangsan", 20), ("lisi", 30), ("wangwu", 40)))
		
		val personRDD: RDD[Person] = rdd.map(p => Person(p._1, p._2))
		
		// Dataset 既有结构又有类型
		val personDS: Dataset[Person] = personRDD.toDS()
		personDS.show()
		personDS.createTempView("person")
		
		// 转回来有结构也有类型，反问方式是一样的
		val rowRDD: RDD[Person] = personDS.rdd
		
		spark.sql("select name, age from person").show()
		
	}
}

/**
  * EDD -> DataFrame -> DataSet
  */

object TestDataframe5 {
	
	case class Person(name: String, age: Int)
	
	import org.apache.spark.sql.SparkSession
	
	def main(args: Array[String]): Unit = {
		
		val conf: SparkConf = new SparkConf().setAppName("sql").setMaster("local[*]")
		
		val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
		import spark.implicits._
		
		// 创建RDD
		val listRDD: RDD[Int] = spark.sparkContext.makeRDD(Array(1, 2, 3))
		
		val rdd: RDD[(String, Int)] = spark.sparkContext.makeRDD(Array(("zhangsan", 20), ("lisi", 30), ("wangwu", 40)))
		
		val personRDD: RDD[Person] = rdd.map(p => Person(p._1, p._2))
		
		// Dataset 既有结构又有类型
		val personDF: DataFrame = personRDD.toDF("name", "age")
		val personDS: Dataset[Person] = personDF.as[Person]
		
		personDS.createTempView("person")
		
		spark.sql("select name, age from person").show()
		
		personDF.select($"name").show()
		
		spark.read.json("testData/input/word/test.json").select($"")
	}
}