package test

import org.apache.spark.sql.SparkSession


object ff {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .getOrCreate()
    val sc = spark.sparkContext

  //英文词频统计
    sc.textFile("src/main/resources/word.txt")
      .flatMap(_.split(" "))  //扁平映射
      .map(x => (x,1))  //转换成元组（key value)
      .reduceByKey((x,y) => x + y)  //聚会
      .foreach(println)


  //中文分词断句
   // val word1 = HanLP.segment("放假前还要上课啊啊啊啊")
    //println(word1)
   // println(word1.asScala.map(_.word.trim))
   // val word2 = Standardtokenizer.segment("不要不要下雨，不要下雨")
   // val word3 = Standardtokenizer.segment("太阳太阳太阳晴天晴天")
   // println(word2)
   // println(word3.asScala.map(_.word.replaceAll("\\s+","")))
    sc.stop()






    spark.stop()
  }
}
