package org.example
import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary
import com.hankcs.hanlp.summary.TextRankKeyword
import com.hankcs.hanlp.tokenizer.StandardTokenizer
import org.apache.spark.sql.{SparkSession, functions}

import scala.collection.convert.ImplicitConversions.`collection AsScalaIterable`

object zuoy {
  def main(args:Array[String]):Unit ={
    val spark = SparkSession.builder().master("local[*]").appName("spark").getOrCreate()
    val sc = spark.sparkContext
    sc.textFile("src/main/resources/words.txt")
      .flatMap(_.split(""))
      .map(x => (x,1))
      .reduceByKey((x,y)=> x + y)
      .foreach(println)
    val chinese1 =HanLP.segment("张三上午没吃早餐，中午想吃牛肉")
    val chinese2 =StandardTokenizer.segment("明天放假++清明节++过三周还有假期++五一劳 动节")
    TextRankKeyword.getKeywordList("速看！广东这5个地方将要拆迁，征收位置、面积、补偿和安 置标准都已公布，快来看看有你家吗", 5)
    val chinese3 = """11:20:00 23510206030 [spark大数据] www.baidu.com""".split("\\s+")
     println(chinese3(2).replaceAll("\\[|\\]",""))
    val textArr = Array(
    "su7爆燃遇难者母亲清空相关微博",
     "甲亢哥被重庆甜妹教育了",
     "胡歌老婆是他前助理",
      "金价再起飞",
     "你能接受与伴侣的年龄差是多少",
     )
    val textRDD = sc.parallelize(textArr)
    val textResult = textRDD.map{
      text =>
        val keyword = TextRankKeyword.getKeywordList(text,5).toString
        val words = transform(text)
        (text, keyword, words)
    }
    textResult.take(1).foreach(println)

  }
  def transform(sentense:String):List[String] ={
    val list = StandardTokenizer.segment(sentense)
    CoreStopWordDictionary.apply(list)
    list.map(x => x.word.replaceAll(" ","")).toList
  }
  }
