//package org.data
//
//import org.apache.spark.SparkConf
//import org.apache.spark.sql.SparkSession
//import org.apache.spark.{SparkConf,SparkContext}
//
//object word {
//  def main(args: Array[String]): Unit = {
//    val spark = SparkSession
//      .builder()
//      .master("Local[*]")
//      .appName("spark")
//      .getOrCreate()
//    val sc =spark.sparkContext
//    sc.textFile("src/main/resources/words.txt")
//      .flatMap(_.split(" "))
//      .map(x => (x, 1))
//      .reduceByKey((x, y) => x + y)
//
//
//    val chinese1 = HanLP.segment("张三上午没吃早餐，中午想吃牛肉面")
//    println(chinese1)
//
//    val chinese2 = StandardTokenizer.segment("明天放假+请调节+过三周还有假期+五一劳动节")
//    TextRankKeyword.getKeywordList("速看！广东这5个地方将要拆迁，征收位置、面积、补偿和安置标准都已公布，快来看看你家吗", 5)
//    val chinese3 =
//      """11:20:00 23510206030 [spark大数据]
//    www.baidu.com""".split("\\s+")
//    println(chinese3(2).replaceAll("\\[|\\]", ""))
//
//    val textArr = Array(
//      "suzu燃燒過電氣帶來清空相對微掃",
//      "甲方被處於崩塌狀態了",
//      "將乾燥產品輸出到車庫",
//      "金價再起飞",
//      "你能接受与伴侶的年龄差是多少"
//    )
//
//    val textRDD = sc.parallelize(textArr)
//    val textResult = textRDD.map(
//      text => {
//        val keyword = TextRankKeyword.getKeywordList(text, 5).toString
//        val words = transform(text)
//        (text, keyword, words)
//      }
//    )
//    textResult.take(1).foreach(println) // Fixed: changed printIn to println
////    //英文词频统计
////    sc.textFile("D:\\word.txt")
////      .flatMap(_.split(" ")) //扁平映射
////      .map(x => (x,1))
////      .reduceByKey((x,y) => x+y)
////      .foreach(println)
////    //中文分词
////    val chinese1 = HanLP.segme
////    sc.stop()
////    val conf = new SparkConf().setAppName("word").setMaster("local")
////    val scc = new SparkContext(conf)
////    val input = "D:\\word.txt"
////    val count = sc.textFile(input).flatMap(
////      x => x.split("")).map( x => (x,1)).reduceByKey((x,y) => x+y)
////    count.foreach(x => println(x._1 + "," + x._2))
//  }
//}
