package cn.wangjie.spark.convert

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{DoubleType, LongType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

/**
 * 将RDD转换为DataFrame，采用自定义Schema方式
 *      a. RDD[Row]
 *         b. Schema
 *         c. RDD[Row] + Schame
 */
object SparkRDDSchema {
	
	
	def main(args: Array[String]): Unit = {
		// 创建SparkSession实例对象，通过建造者模式
		val spark: SparkSession = SparkSession.builder()
			// 设置应用名称
			.appName(this.getClass.getSimpleName.stripSuffix("$"))
			// 设置运行模式
			.master("local[2]")
			.getOrCreate()
		// 导入隐式转换函数
		import spark.implicits._
		
		// 加载文本数据
		val rawRatingsRDD: RDD[String] = spark.sparkContext.textFile("datas/ml-100k/u.data")
		
		// TODO: 1. 将RDD中每行数据解析封装Row中
		val rowsRDD: RDD[Row] = rawRatingsRDD
			.filter(line => null != line && line.trim.split("\\t").length == 4)
			.mapPartitions{iter =>
				iter.map{line =>
					val arr = line.trim.split("\\t")
					Row(arr(0), arr(1), arr(2).toDouble, arr(3).toLong)
				}
			}
		
		// TODO: 2. 自定义Schema
		val schema: StructType = StructType(
			StructField("user_id", StringType, nullable = true) ::
			StructField("item_id", StringType, nullable = true) ::
			StructField("rating", DoubleType, nullable = true) ::
			StructField("timestamp", LongType, nullable = true) :: Nil
		)
		
		// TODO: 3. 应用rowsRDD和schema构建DataFrame
		val ratingsDF: DataFrame = spark.createDataFrame(rowsRDD, schema)
		ratingsDF.printSchema()
		ratingsDF.show()
		
		val xx: (Int, String) = 11 -> "aa"
		xx.swap
		
		// 应用结束，关闭资源
		spark.stop()
	}
}
