package weibo

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object EveryMonthKeyWord {
  def main(args: Array[String]): Unit = {
//    val sc: SparkContext = new SparkContext("local[*]", "EveryMonthKeyWord")
    val inputPath: String = "file/WeiBoData/input"
    val outputPath: String = "file/WeiBoData/output/everyMonthKeyWord"

    val conf: SparkConf = new SparkConf().setAppName("EveryMonthKeyWord")
    val sc: SparkContext = new SparkContext(conf)

    val dataRdd: RDD[String] = sc.textFile(args(0))
      .repartition(2)


    val lineRegex = """#.*?#""".r
    val res02 = dataRdd
      .filter(data => data.contains("#") && data.split("\t")(2).substring(5,7).contains("02"))
      .map(x => {
        ((x.split("\t")(2).substring(5,7),lineRegex.findFirstIn(x).getOrElse(x)),1)
      })
      .reduceByKey(_+_)
      .sortBy(_._2,false)
      .take(3)
    val res03 = dataRdd
      .filter(data => data.contains("#") && data.split("\t")(2).substring(5,7).contains("03"))
      .map(x => {
        ((x.split("\t")(2).substring(5,7),lineRegex.findFirstIn(x).getOrElse(x)),1)
      })
      .reduceByKey(_+_)
      .sortBy(_._2,false)
      .take(3)
    val res04 = dataRdd
      .filter(data => data.contains("#") && data.split("\t")(2).substring(5,7).contains("04"))
      .map(x => {
        ((x.split("\t")(2).substring(5,7),lineRegex.findFirstIn(x).getOrElse(x)),1)
      })
      .reduceByKey(_+_)
      .sortBy(_._2,false)
      .take(3)
    val res05 = dataRdd
      .filter(data => data.contains("#") && data.split("\t")(2).substring(5,7).contains("05"))
      .map(x => {
        ((x.split("\t")(2).substring(5,7),lineRegex.findFirstIn(x).getOrElse(x)),1)
      })
      .reduceByKey(_+_)
      .sortBy(_._2,false)
      .take(3)
    val res06 = dataRdd
      .filter(data => data.contains("#") && data.split("\t")(2).substring(5,7).contains("06"))
      .map(x => {
        ((x.split("\t")(2).substring(5,7),lineRegex.findFirstIn(x).getOrElse(x)),1)
      })
      .reduceByKey(_+_)
      .sortBy(_._2,false)
      .take(3)
    val res07 = dataRdd
      .filter(data => data.contains("#") && data.split("\t")(2).substring(5,7).contains("07"))
      .map(x => {
        ((x.split("\t")(2).substring(5,7),lineRegex.findFirstIn(x).getOrElse(x)),1)
      })
      .reduceByKey(_+_)
      .sortBy(_._2,false)
      .take(3)

    val res = res02.union(res03).union(res04).union(res05).union(res06).union(res07)
    sc.parallelize(res).saveAsTextFile(args(1))

  }
}
