package org.example

import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary
import com.hankcs.hanlp.summary.TextRankKeyword
import com.hankcs.hanlp.tokenizer.StandardTokenizer
import org.apache.spark.sql.SparkSession

import scala.collection.convert.ImplicitConversions.`collection AsScalaIterable`
import scala.jdk.CollectionConverters.CollectionHasAsScala

object data1_words {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("SaveTocCSV")
      .getOrCreate()
    val sc = spark.sparkContext
    //    英文词频统计
    sc.textFile("src/main/resources/word.txt")
      .flatMap(_.split(" ")) //扁平映射
      .map(x => (x, 1)).reduceByKey((x, y) => x + y) //聚合
      .foreach(println)
    //    中文词频统计
        val chinese1 = HanLP.segment("身高一米七八年收入15万，人老实话不多")
        println(chinese1)
        println(chinese1.asScala.map(_.word.trim))
        //    标准分词
        val chinese2 = StandardTokenizer.segment("乌拉呀哈耶嘿乌沙奇")
        println(chinese2)
        println(chinese2.asScala.map(_.word.replaceAll("\\s+", "")))
        //    关键词提取
        TextRankKeyword.getKeywordList("若暗夜降临，吾必立于万万人之上", 4)
          .forEach(println)

  }

}

//    //    关键词过滤
//    //    val chinese3 = """16:00:00 pm [spark课程] www.baidu.com""".split("\\s+")
//    //    println(chinese3(2).replace("\\[|\\",""))
//    //   关键词筛选
//    val chinese3 = """11:20:00 23510206030 [spark大数据] www.baidu.com""".split("\\s+")
//    println(chinese3(2).replaceAll("\\[|\\]", ""))
//    val textArr = Array("su7爆燃遇难者母亲清空相关微博", "甲亢哥被重庆甜妹教育了", "胡歌老婆是他前助理",
//      "金价再起飞", "你能接受与伴侣的年龄差是多少")
//    val textRDD = sc.parallelize(textArr)
//    val textResult = textRDD.map { text =>
//      val keyword = TextRankKeyword.getKeywordList(text, 5).toString
//      val words = transform(text)
//      (text, keyword, words)
//    } // RDD[(String, String, List[String])]
//    textResult.take(1).foreach(println)
//    sc.stop()
//  }
//  // 结果转换，可以不显示词性
//  def transform(sentense: String): List[String] = {
//    val list = StandardTokenizer.segment(sentense)
//    CoreStopWordDictionary.apply(list)
//    list.map(x => x.word.replaceAll(" ", "")).toList