package org.example
import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary
import com.hankcs.hanlp.summary.TextRankKeyword
import com.hankcs.hanlp.tokenizer.StandardTokenizer
import org.apache.spark.sql.{SparkSession, functions}

import scala.collection.convert.ImplicitConversions.`collection AsScalaIterable`
import scala.jdk.CollectionConverters.CollectionHasAsScala
import scala.xml.NodeSeq.Empty.text

object data1_words {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
    sc.textFile("src/main/resources/word.txt")
      .flatMap(_.split(" "))
      .map(x => (x, 1))
      .reduceByKey((x, y) => x + y)
      .foreach(println)
    val chinese1 = HanLP.segment("身高一米七八年收入15万人老实话不多")
    println(chinese1)
    println(chinese1.asScala.map(_.word.trim))
    val chinese2 = StandardTokenizer.segment("清明节放假++五一劳动节放假++暑假")
    println(chinese2)
    println(chinese2.asScala.map(_.word.replaceAll("\\s+", "")))
    TextRankKeyword.getKeywordList("速看！广东这5个地方要拆迁、征收位置、面积、补偿和安置标准都以公布，快俩看看有你家吗", 5)
      .forEach(println)
    val chinese3 = """16:00:00 pm [spark课程] www.baidu.com""".split("\\s+")
    println(chinese3(2).replaceAll("\\[|\\]", ""))
    val textArr = Array(
      "su7爆燃遇难者母亲清空相关微博",
      "甲亢哥被重庆甜妹教育了",
      "胡歌老婆是他的前助理",
      "金价在起飞了",
      "你能接受与伴侣的年龄差是多少"
    )
    val textRDD = sc.parallelize(textArr)
    val textResult = textRDD.map {
      text =>
        val keyword = TextRankKeyword.getKeywordList(text, 5).toString
        val words = transform(text)
        (text, keyword, words)
    } //RDD[String,String,List[String])]
    textResult.take(1).foreach(println)
  }
  def transform(sentense: String):List[String] ={
    val list = StandardTokenizer.segment(sentense)
    CoreStopWordDictionary.apply(list)
    list.map(x => x.word.replaceAll(" ","")).toList
  }

}
