package cn.wangjie.spark.todf

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{ColumnName, DataFrame, SparkSession}

/**
 * 隐式调用toDF函数，将数据类型为元组的Seq和RDD集合转换为DataFrame
 */
object SparkSQLToDF {

  def main(args: Array[String]): Unit = {
		// 创建SparkSession实例对象，通过建造者模式
		val spark: SparkSession = SparkSession.builder()
			// 设置应用名称
			.appName(this.getClass.getSimpleName.stripSuffix("$"))
			// 设置运行模式
			.master("local[2]")
			.getOrCreate()
		// 导入隐式转换函数
		import spark.implicits._
		
		// TODO: 1、构建RDD，数据类型为三元组形式
		val usersRDD: RDD[(Int, String, Int)] = spark.sparkContext.parallelize(
			Seq(
				(10001, "zhangsan", 23),
				(10002, "lisi", 22),
				(10003, "wangwu", 23),
				(10004, "zhaoliu", 24)
			)
		)
		// 将RDD转换为DataFrame
		val usersDF: DataFrame = usersRDD.toDF("id", "name", "age")
		/*
			root
			 |-- id: integer (nullable = false)
			 |-- name: string (nullable = true)
			 |-- age: integer (nullable = false)
		 */
		usersDF.printSchema()
		usersDF.show(10, truncate = false)
		
		
		println("========================================================")
		
		val df: DataFrame = Seq(
			(10001, "zhangsan", 23),
			(10002, "lisi", 22),
			(10003, "wangwu", 23),
			(10004, "zhaoliu", 24)
		).toDF("id", "name", "age")
		df.printSchema()
		df.show(10, truncate = false)
		
		// 应用结束，关闭资源
		spark.stop()
	}
	
}
