package sparkCode

import org.apache.spark.{SparkConf, SparkContext}

object FileRDD {
  def main(args:Array[String]):Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("hello scala")
    //2   构建sparkcontext  sc
    val sc = new SparkContext(conf)
    sc.setLogLevel("error")

    //读取数据文件
    //val fileRDD = sc.textFile("D:\\Scala\\data.txt")    //统计各个品类的词频
    val fileRDD = sc.textFile("D:\\Scala\\teacher.log")   //统计最受欢迎的老师

    println("--------------")
    // 提取老师名字并统计出现次数
    val teacherCounts = fileRDD.flatMap(line => line.split("http://"))
      .filter(url => url.contains("/")).map(url => url.split("/")(1)).map(teacher => (teacher, 1))
      .reduceByKey(_ + _)

    // 找到出现次数最多的老师
    val mostFrequentTeacher = teacherCounts
      .reduce((a, b) => if (a._2 > b._2) a else b) // 获取出现次数最多的老师

    // 输出结果
    println(s"(${mostFrequentTeacher._1}, ${mostFrequentTeacher._2})")

    // 停止 SparkContext
    sc.stop()

    /*
    fileRDD.flatMap(_.split("\\\\00A")).map(_.split(" ")).filter(_.length > 2).map(_(2)).map((_, 1)).reduceByKey(_ + _).foreach(println(_))
     */
    /*    //统计各个品类的词频
    val filterElements = Set("玩具", "家电","服装") // 定义要筛选的多个字符串集合
    fileRDD.flatMap(line => line.split("\\\\00A")).
      filter(item => filterElements.exists(_ == item)).map((_,1)).reduceByKey((_+_)).foreach(println(_))
    */
    }
}