package org.example

import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary
import com.hankcs.hanlp.summary.TextRankKeyword
import com.hankcs.hanlp.tokenizer.StandardTokenizer
import org.apache.spark.sql.SparkSession

import scala.collection.convert.ImplicitConversions.`collection AsScalaIterable`


object word {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
    // 英文词频统计
    //sc.textFile("src/main/resources/word.txt")
      //.flatMap(_.split(" "))
     // .map(x => (x, 1))
     // .reduceByKey((x, y) => x + y)
     // .foreach(println)
    // 中文分词
    //val chinese1 = HanLP.segment("张三上午没吃饭，晚上吃大餐")
    //println(chinese1)
    //val abc=StandardTokenizer.segment("明天清明放假+五一假期+暑假放假")
    //println(abc)
    //println(abc.asScala.map(_.word.replaceAll("\\s+","")))
    //val abc3 = """11:20:00 23510206030 [spark大数据] www.baidu.com""".split("\\s+")
    //println(abc3(2).replaceAll("\\[|\\]",""))
    //sc.stop()
    val textArr = Array(
    "su7爆燃遇难者母亲清空相关微博",
    "甲亢哥被重庆甜妹教育了",
    "胡歌老婆是他前助理",
    "金价再起飞",
    "你能接受与伴侣的年龄差是多少"
    )
    val textRDD = sc.parallelize(textArr)

    val textResult = textRDD.map {
      text =>
        val keyword = TextRankKeyword.getKeywordList(text, 5).toString
        val words = transform(text)
        (text, keyword, words)
    }
    textResult.take(1).foreach(println)
  }
  def transform(sentense:String):List[String] ={
    val list = StandardTokenizer.segment(sentense)
    CoreStopWordDictionary.apply(list)
    list.map(x => x.word.replaceAll(" ","")).toList

  }
}