package cn.wangjie.spark.start

import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

/**
 * 使用SparkSQL中DSL方式分析数据，进行词频统计WordCount
 */
object SparkDSLWordCount {

  def main(args: Array[String]): Unit = {
		
		// 1. 创建SparkSession实例对象，通过建造者模式
		val spark: SparkSession = SparkSession.builder()
			// 设置应用名称
			.appName(this.getClass.getSimpleName.stripSuffix("$"))
			// 设置运行模式
			.master("local[2]")
			.getOrCreate()
		// 导入隐式转换函数
		import spark.implicits._
		
		// 2. 加载文本数据
		val inputDS: Dataset[String] = spark.read.textFile("datas/wordcount/wordcount.data")
		println(s"Count = ${inputDS.count()}")
		inputDS.show(5, truncate = false)
		
		// TODO: 3. 调用Dataset API分析数据
		val resultDF: DataFrame = inputDS
			// 过滤不合格的数据
			.filter(line => null != line && line.trim.length > 0)
			// 每行数据进行分割
			.flatMap(line => line.trim.split("\\s+"))
			// 分组统计
			.groupBy("value").count()
		// SELECT word, count(1) AS total FROM words GROUP BY word
		
		/*
			root
			 |-- value: string (nullable = true)
			 |-- count: long (nullable = false)
		 */
		resultDF.printSchema()
		/*
			+---------+-----+
			|value    |count|
			+---------+-----+
			|sql      |2    |
			|spark    |11   |
			|mapreduce|4    |
			|hdfs     |2    |
			|hadoop   |3    |
			|hive     |6    |
			+---------+-----+
		 */
		resultDF.show(10, truncate = false)
		
		
		// 查看Schema信息
		/*
		root
            |-- value: string (nullable = true)
		 */
		//tmpDS.printSchema()
		//tmpDS.show(10, truncate = false)
		
		// 应用结束，关闭资源
		spark.stop()
	}
}
