package cn.wangjie.spark.search

import com.hankcs.hanlp.HanLP
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.JavaConverters._
import scala.collection.mutable

object SogouQueryAnalysis {
	
	def main(args: Array[String]): Unit = {
		// 构建SparkContext上下文实例对象
		val sc: SparkContext = {
			// 1.a 创建SparkConf对象，设置应用属性，比如应用名称和运行模式
			val sparkConf = new SparkConf()
				.setAppName(this.getClass.getSimpleName.stripSuffix("$"))
				.setMaster("local[2]")
			// 1.b 创建实例对象
			SparkContext.getOrCreate(sparkConf)
		}
		
		// TODO: 1. 读取日志数据
		//val rawLogsRDD: RDD[String] = sc.textFile("datas/sogou/SogouQ.sample", minPartitions = 2)
		val rawLogsRDD: RDD[String] = sc.textFile("datas/sogou/SogouQ.reduced", minPartitions = 2)
		//println(s"Count = ${rawLogsRDD.count()} \n First: ${rawLogsRDD.first()}")
		
		// TODO: 2. 数据ETL，提取字段数据，解析并封装SogouRecord
		val recordsRDD: RDD[SogouRecord] = rawLogsRDD
			// 过滤不合格的数据
			.filter(line => null != line && line.trim.split("\\s+").length == 6)
			// 解析封装
			.mapPartitions{iter =>
				iter.map{line =>
					val arr: Array[String] = line.trim.split("\\s+")
					SogouRecord(
						arr(0), arr(1), arr(2).replaceAll("\\[|\\]", ""), //
						arr(3).toInt, arr(4).toInt, arr(5)
					)
				}
			}
		// 由于后续需求都是针对日志数据分析，建议缓存
		recordsRDD.persist(StorageLevel.MEMORY_AND_DISK).count()
		//println(recordsRDD.first())
		
		// TODO: =================== 搜索关键词统计 ===================
		// 首先对搜索词进行分词（使用HanLP），再统计每个搜索词出现次数，最后获取Top10单词
		val wordsRDD: RDD[String] = recordsRDD.mapPartitions{ iter =>
			iter.flatMap{record =>
				val keyword: String = record.queryWords
				// 使用HanLP分词
				val words: mutable.Seq[String] = HanLP.segment(keyword).asScala.map(term => term.word)
				// 返回分词
				words
			}
		}
		// 按照单词分组统计
		val top10SearchWords: Array[(Int, String)] = wordsRDD
			.map(word => (word, 1))
			.reduceByKey((tmp, item) => tmp + item)
			// sortByKey排序
			.map(tuple => tuple.swap)
			.sortByKey(ascending = false)
			// Top10 搜索词
			.take(15)
		top10SearchWords.foreach(println)
		
		// TODO: ================== 用户搜索点击统计 ==================
		// 统计出每个用户每个搜索词点击网页的次数, 先按照用户ID分组，再按照查询词分组，最后统计
		val clickCountRDD: RDD[((String, String), Int)] = recordsRDD
			// 技巧：将用户ID和查询词 组合为二元组作为Key，相当于分组，直接聚合即可
			.map(record => ((record.userId, record.queryWords), 1))
			.reduceByKey(_ + _)
		clickCountRDD
			.sortBy(tuple => tuple._2, ascending = false)
			.take(10)
			.foreach(println)
		println(s"最大次数：${clickCountRDD.values.max()}")
		println(s"最小次数：${clickCountRDD.values.min()}")
		println(s"平均次数：${clickCountRDD.values.mean()}")
		
		// TODO: ================== 搜索时间段统计 ==================
		recordsRDD
			.mapPartitions{iter =>
				iter.map{record =>
					// 搜索时间：00:00:00 -> 00
					val hour: String = record.queryTime.substring(0, 2)
					(hour, 1)
				}
			}
			.reduceByKey(_ + _) // 分组聚合统计
			// 排序
			.sortBy(tuple => tuple._2, ascending = false)
			.foreachPartition(iter => iter.foreach(println))
		
		// TODO: SELECT xx, xx ,count(1) AS total FROM records GROUP BY xx, xx ORDER BY total LIMIT 10 ;
		
		// 当数据不再使用时，释放资源
		recordsRDD.unpersist()
		
		// 应用结束，关闭资源
		sc.stop()
	}
	
}
