package org.example

import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.tokenizer.StandardTokenizer
import org.apache.spark.sql.SparkSession

object WordCount {
  def main(args: Array[String]): Unit ={
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("sparkBase")
      .getOrCreate()
    val sc = spark.sparkContext

     sc.textFile("src/main/resources/words.txt")
      .flatMap(_.split("")).map(x => (x,1)).reduceByKey((x,y) =>x+y)
      .foreach(x => println(x._1+ "," +x._2))

    val chinese = HanLP.segment("严守阵地一天，现在特别想吃皮蛋瘦肉粥")
    println(chinese)
//    println(chinese.asScala.map(_.word.trim))

    val terms = StandardTokenizer.segment("放假++清明++五一")
    println(terms)
//    println(terms.asScala.map(_.word.replace("\\s+","")))

    val words = """00:00:00 23510206 [spark大数据] 4 4 https://www.baidu.com"""
      .split("\\s+")
    println(words(2).replaceAll("\\[|\\]",""))

    sc.stop()
  }

}
