package cn.aijson.demo.nlp

import com.hankcs.hanlp.HanLP
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.JavaConverters

object SougoNlpAnalysis {
  def main(args: Array[String]): Unit = {
    //1、准备spark context
    var conf:SparkConf=new SparkConf()
      .setAppName("workdcount")
      .setMaster("local[*]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")

    //2、加载数据,生成分布式数据集RDD
    val lines:RDD[String] = sc.textFile("data/sogou/SogouQ.sample")
    //lines.foreach(println)
    //3\数据转换和操作
    //TODO 2.处理数据
    //封装数据
    val SogouRecordRDD: RDD[SogouRecord] = lines.map(line => {//map是一个进去一个出去
      val arr: Array[String] = line.split("\\s+")
      SogouRecord(
        arr(0),
        arr(1),
        arr(2),
        arr(3).toInt,
        arr(4).toInt,
        arr(5)
      )
    })
    //SogouRecordRDD.foreach(println)
    val wordsRDD: RDD[String] = SogouRecordRDD.flatMap(record => { //flatMap是一个进去,多个出去(出去之后会被压扁) //360安全卫士==>[360, 安全卫士]
      val wordsStr: String = record.queryWords.replaceAll("\\[|\\]", "") //360安全卫士
      import scala.collection.JavaConverters._ //将Java集合转为scala集合
      HanLP.segment(wordsStr).asScala.map(_.word) //ArrayBuffer(360, 安全卫士)
    })
  //wordsRDD.foreach(println)

    //3.统计指标
    //--1.热门搜索词
    val top10: Array[(String, Int)] = wordsRDD
      .filter(word => !word.equals(".") && !word.equals("+"))
      .map((_, 1))
      .reduceByKey(_ + _)
      .sortBy(_._2, false)
      .take(10)

    top10.foreach(println)

    //--2.用户热门搜索词(带上用户id)
    //处理每一行，将用户ID和分词组合成二元组
    val userIdAndWordRDD: RDD[(String, String)] = SogouRecordRDD.flatMap(record => { //flatMap是一个进去,多个出去(出去之后会被压扁) //360安全卫士==>[360, 安全卫士]
      val wordsStr: String = record.queryWords.replaceAll("\\[|\\]", "") //360安全卫士
      import scala.collection.JavaConverters._ //将Java集合转为scala集合
      val words = HanLP.segment(wordsStr).asScala.map(_.word) //ArrayBuffer(360, 安全卫士)
      val userId: String = record.userId
      words.map(word => (userId, word))
    })
    //遍历二元组，把二元组进行累加计算，得出
    val userWords: Array[((String, String), Int)] = userIdAndWordRDD
      .filter(t => !t._2.equals(".") && !t._2.equals("+"))
      .map((_, 1))
      .reduceByKey(_ + _)
      .sortBy(_._2, false)
      .take(10)
    userWords.foreach(println)

    //--3.各个时间段搜索热度
    val timeHot: Array[(String, Int)] = SogouRecordRDD.map(record => {
      val timeStr: String = record.queryTime
      val hourAndMitunesStr: String = timeStr.substring(0, 5)
      (hourAndMitunesStr, 1)
    }).reduceByKey(_ + _)
      .sortBy(_._2, false)
      .take(10)

    timeHot.foreach(println)


    //--4.时间段top10搜索词
    val timeWords: RDD[(String, String)] = SogouRecordRDD.flatMap(record => {
      //解析热搜词
      val wordsStr: String = record.queryWords.replaceAll("\\[|\\]", "") //360安全卫士
      import scala.collection.JavaConverters._ //将Java集合转为scala集合
      val words = HanLP.segment(wordsStr).asScala.map(_.word) //ArrayBuffer(360, 安全卫士)
      //解析时间段
      val timeStr: String = record.queryTime
      val hourAndMitunesStr: String = timeStr.substring(0, 5)
      words.map(word => (hourAndMitunesStr, word))
    })


    val timeHotWords = timeWords
      .filter(t => !t._2.equals(".") && !t._2.equals("+"))
      .map((_, 1))
      .reduceByKey(_ + _)
      .sortBy(_._2,ascending = false)
      .take(10)
    timeHotWords.foreach(println)

    //--5.用户搜索活跃度 top10

    sc.stop()
  }
}

//准备一个样例类用来封装数据
/**
 * 用户搜索点击网页记录Record
 * @param queryTime  访问时间，格式为：HH:mm:ss
 * @param userId     用户ID
 * @param queryWords 查询词
 * @param resultRank 该URL在返回结果中的排名
 * @param clickRank  用户点击的顺序号
 * @param clickUrl   用户点击的URL
 */
case class SogouRecord(
                        queryTime: String,
                        userId: String,
                        queryWords: String,
                        resultRank: Int,
                        clickRank: Int,
                        clickUrl: String
                      )
