package weibo

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD

object FindDifferUser {
  def IsSpeTime(Time:(String,String)): Boolean = {
    Time._2.contains("12") || Time._2.contains("13") || Time._2.contains("14") || Time._2.contains("19") ||
      Time._2.contains("20") || Time._2.contains("21") || Time._2.contains("22") || Time._2.contains("23")
  }

  def main(args: Array[String]): Unit = {
    val sc: SparkContext = new SparkContext("local[*]", "FindDifferUser")
    val inputPath: String = "file/WeiBoData/input"
    val outputPath: String = "file/WeiBoData/output/everyMonthKeyWord"

    val dataRdd:RDD[(String,String)] = sc.textFile(inputPath)
      .map(x => {
      (x.substring(0,31),x.substring(76,79))
      })
      .repartition(4)

    val dataRddSpe:RDD[(String,String)] = dataRdd.filter(IsSpeTime)


    val Spe:RDD[(String,Int)] = dataRddSpe.map(_._1 -> 1).reduceByKey(_ + _)
    val All:RDD[(String,Int)] = dataRdd.map(_._1 -> 1).reduceByKey(_ + _)
    //结果连接
    val resStu = Spe.join(All)
      .map(info => (info._1,info._2._1*1.0/info._2._2*1.0))
      .filter(_._2>0.6)
      .count()
    val resWork = Spe.join(All)
      .map(info => (info._1,info._2._1*1.0/info._2._2*1.0))
      .filter(_._2<=0.6)
      .count()
    println(resStu,resWork)
    sc.stop()
  }
}
