package cn.oldsix.spark.core.weather

import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.{FileSplit, InputSplit, TextInputFormat}
import org.apache.spark.rdd.HadoopRDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  * @ Author : Wu.D.J
  * @ Create : 2017.07.25
  */
object WeatherAverage {
    
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setAppName("weather-average")
        val sc = new SparkContext(conf)
        val input = "/data/spark-example/weather/"
        val fileRDD = sc.hadoopFile[LongWritable, Text, TextInputFormat](input)
        val hadoopRDD = fileRDD.asInstanceOf[HadoopRDD[LongWritable, Text]]
        val lines = hadoopRDD.mapPartitionsWithInputSplit((inputSplit : InputSplit, iterator : Iterator[(LongWritable, Text)]) =>{
            val file = inputSplit.asInstanceOf[FileSplit]
            iterator.map(x => x._2 + " " + file.getPath.toString)
        })
        val groupRDD = lines.filter(line => line.substring(14, 19).trim().toInt != 9999).map(line => (line.substring(line.length - 9, line.length - 4), line.substring(14, 19).trim().toInt))
        groupRDD.combineByKey(
            (temp) => (1, temp),
            (tmp : (Int, Int), temp) => (tmp._1 + 1, tmp._2 + temp),
            (tmp1 : (Int, Int), tmp2 : (Int, Int)) => (tmp1._1 + tmp2._1, tmp1._2 + tmp2._2)
        ).map{
            case (name, (num, temp)) => (name, temp/num)
        }.collect().foreach(println)
        sc.stop()
    }
}
