package org.example

import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary
import com.hankcs.hanlp.summary.TextRankKeyword
import com.hankcs.hanlp.tokenizer.StandardTokenizer
import org.apache.spark.sql.SparkSession

import scala.collection.convert.ImplicitConversions.`collection AsScalaIterable`
import scala.jdk.CollectionConverters.CollectionHasAsScala

object ketang2 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().master("local[*]").appName("spark").getOrCreate()
    val sc = spark.sparkContext
    /*//英文词频统计
    sc.textFile("src/main/resources/word.txt")
      .flatMap(_.split(""))
      .map(x =>(x,1))
      .reduceByKey((x,y) =>x +y)
      .sortBy(_._2,true)
      .foreach(println)*/

    /*//中文分词
    val chinese1=HanLP.segment("张三上午没吃早餐，中午想吃牛肉面")
    println(chinese1)
    println(chinese1.asScala.map(_.word.trim))
    //标准分词
    val chinese2=StandardTokenizer.segment("明天放假++清明节++过三周还有假期++五一劳动节")
    println(chinese2)
    println(chinese2.asScala.map(_.word.replaceAll("\\s+","")))
    //关键词提取
    TextRankKeyword.getKeywordList("今天是个好日子耶，天气晴朗，阳光明媚，下午上完课就是三天假期，太爽了",5)
      .forEach(println)
    //关键词筛选
    val chinese3="""11:20:00 23510206030214 [spark大数据] www.baidu.com""".split("\\s+")
    println(chinese3(2).replaceAll("\\[|\\]",""))*/
    //sc.stop()

    //随堂练习
    val textArr = Array(
      "su7爆燃遇难者母亲清空相关微博",
      "甲亢哥被重庆甜妹教育了",
      "胡歌老婆是他前助理",
      "金价再起飞",
      "你能接受与伴侣的年龄差是多少",
      "我的学号是14号，姓名是何佳佳"
    )
    val textRDD = sc.parallelize(textArr)
    val textResult = textRDD.map {
      text =>
        val keyword = TextRankKeyword.getKeywordList(text, 5).toString
        val words = transform(text)
        (text, keyword, words)
    } // RDD[(String, String, List[String])]
    textResult.take(6).foreach(println)
  }
  def transform(sentense:String):List[String] ={
    val list = StandardTokenizer.segment(sentense)
    CoreStopWordDictionary.apply(list)
    list.map(x => x.word.replaceAll(" ","")).toList
  }

}
