package weibo

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD

object ValueTop {
  def main(args: Array[String]): Unit = {
    val sc: SparkContext = new SparkContext("local[*]", "ValueTop")
    val inputPath: String = "file/WeiBoData/input"
    val outputPath: String = "file/WeiBoData/output/ValueTop"

    val dataRdd: RDD[(String,Double)] = sc.textFile(inputPath)
      .map(x => {
        (x.split("\t")(0),x.split("\t")(3).toDouble+x.split("\t")(4).toDouble+x.split("\t")(5).toDouble)
      })
      .reduceByKey(_ + _)

    val cntRdd = sc.textFile(inputPath)
      .map(x =>(x.split("\t")(0),1))
      .reduceByKey(_ + _)

    val res = dataRdd.join(cntRdd)
      .map(x => {
        (x._1,x._2._1/x._2._2)
      })
      .sortBy(_._2)
      .take(10)

    res.foreach(println)


    sc.stop()
  }
}
