package weibo

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD

object HourKeyRate {
  def main(args: Array[String]): Unit = {
    val sc: SparkContext = new SparkContext("local[*]", "HourKeyRate")
    val inputPath: String = "file/WeiBoData/input"
    val outputPath: String = "file/WeiBoData/output/everyMonthKeyWord"

    val dataRdd:RDD[(String,Int)] = sc.textFile(inputPath)
      .filter(x => x.contains("#"))
      .map(x => {
        (x.substring(77, 79),1)
      })
      .reduceByKey(_ + _)
      .sortBy(_._1)
      .repartition(1)

    dataRdd.foreach(println)


    sc.stop()
  }
}
