package weibo

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD

//00-07 08-11 12-13 14-18 19-23
object SpeTimeUserAct {
  def main(args: Array[String]): Unit = {
    val sc: SparkContext = new SparkContext("local[*]", "SpeTimeUserAct")
    val inputPath: String = "file/WeiBoData/input"
    val outputPath: String = "file/WeiBoData/output/everyMonthKeyWord"

    val dataRdd: RDD[(String, Int)] = sc.textFile(inputPath)
      .map(x => {
        (x.substring(77, 79) match {
          case "00" => "00-07"
          case "01" => "00-07"
          case "02" => "00-07"
          case "03" => "00-07"
          case "04" => "00-07"
          case "05" => "00-07"
          case "06" => "00-07"
          case "07" => "00-07"
          case "08" => "08-11"
          case "09" => "08-11"
          case "10" => "08-11"
          case "11" => "08-11"
          case "12" => "12-13"
          case "13" => "12-13"
          case "14" => "14-18"
          case "15" => "14-18"
          case "16" => "14-18"
          case "17" => "14-18"
          case "18" => "14-18"
          case _ => "19-23"
        }, 1)
      })
      .reduceByKey(_ + _)
      .sortBy(_._1)
      .repartition(2)

    dataRdd.foreach(println)

    sc.stop()
  }
}
