package org.example
import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary
import com.hankcs.hanlp.summary.TextRankKeyword
import com.hankcs.hanlp.tokenizer.StandardTokenizer
import org.apache.spark.sql.SparkSession
import scala.collection.convert.ImplicitConversions.`collection AsScalaIterable`
object Words {
  def main(args: Array[String]): Unit = {
    // 创建spark运行环境
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
    //英文词频统计
    //    sc.textFile("src/main/resources/words.txt")
    //      .flatMap(_.split(""))
    //      .map(x => (x,1))
    //      .reduceByKey((x,y) => x+y)
    //      .foreach(println)
    //中文分词
    //    val chinese1 = HanLP.segment("张三没吃早餐，中午也不想吃了")
    //    println(chinese1)
    //    println(chinese1.asScala.map(_.word.trim))
    //标准分词
//    val chinese2 = StandardTokenizer.segment("明天放假++清明节++过两周还有假期++五一劳动节")
//    println(chinese2)
//    println(chinese2.asScala.map(_.word.replaceAll("\\s+", "")))
//    //关键词提取
//    TextRankKeyword.getKeywordList("速看！马上要下课啦啦啦", 3)
//      .forEach(println)
//    //关键词筛选
//    val chinese3 = """11:20:00 235102060302 [spark大数据] www.baidu.com""".split("\\s+")
//    println(chinese3(2).replaceAll("\\[|\\]", ""))
    val textArr = Array(
      "su7爆燃遇难者母亲清空相关微博",
      "甲亢哥被重庆甜妹教育了",
      "胡歌老婆是他前助理",
      "金价再起飞",
      "你能接受与伴侣的年龄差是多少"
    )
    val textRDD = sc.parallelize(textArr)
    val textResult = textRDD.map {
      text =>
        val keyword = TextRankKeyword.getKeywordList(text, 5).toString
        val words = transform(text)
        (text, keyword, words)
    }
    textResult.take(1).foreach(println)
  }
  def transform(sentense: String): List[String]={
    val list =StandardTokenizer.segment(sentense)
    CoreStopWordDictionary.apply(list)
    list.map(x => x.word.replaceAll("","")).toList
  }
}
