import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.summary.TextRankKeyword
import com.hankcs.hanlp.tokenizer.StandardTokenizer
import org.apache.spark.api.java.JavaRDD.fromRDD
import org.apache.spark.sql.SparkSession

import scala.jdk.CollectionConverters.CollectionHasAsScala

object data1_words {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("Spark Pi")
      .getOrCreate()
    val sc = spark.sparkContext
    sc.textFile("F:\\Spark\\karry\\karry\\src\\main\\resources\\word.txt")
      .flatMap(_.split(" "))//扁平映射
      .map(x=>(x,1))//映射成元组(key value)
      .reduceByKey((x,y)=>x + y)//聚合
      .foreach(println)
    //中文分词
    val chinese1=HanLP.segment("身高一米七八收入15万人老实话不多")
    println(chinese1)
    println(chinese1.asScala.map(_.word.trim))
    //标准分词
    val chinese2=StandardTokenizer.segment("清明节+五一劳动节+暑假")
    println(chinese2)
    println(chinese2.asScala.map(_.word.replaceAll("\\s+","")))
    //关键词提取
    TextRankKeyword.getKeywordList("速看！广东这五个地方将要拆迁，征收位置、面积、补偿和安置标准都已经公布，快来看看有你家吗？",5 )
      .forEach(println)
    //关键词过滤
    val chinese3="""16:00:00 pm [spark课程] www.baidu.com""".split("\\s+")
    println(chinese3(2).replaceAll("\\[|\\]",""))


    sc.stop()
  }

}
