package cn.lecosa.spark
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions
import org.apache.spark.HashPartitioner


object  Temperature  {
  def main(args: Array[String]) {
 
    val conf = new SparkConf(); //创建SparkConf对象  设置spark参数
    conf.setAppName("wow,My First Spark App!"); //设置应用程序的名称，在spark程序运行的监控界面可以看到名称
    conf.setMaster("local[2]"); //此时，程序在本地运行，不需要安装spark集群
  
    val sc = new SparkContext(conf);

//    val lines = sc.textFile("hdfs://cdh2:8020/testData/average/average.txt", 1); //path:文件路径    minPartitions：最小并行度的数量
val lines = sc.textFile("F:/spark/workspace/SparkDemo01/temperature",3)  
val r1=lines.filter(line=>{line.trim.length()>0});
val r2=r1.map { x =>(x.substring(15, 19).toInt,x.substring(45,50).toInt ) }


val r3=r2.groupByKey().map(x=>{
  var max=Integer.MIN_VALUE;
  for(data <-x._2){
    if(data>max){
      max=data;
    }
  }
  (max,x._1)
})

 r3.foreach {println }
    sc.stop();
  }
}

//http://blog.csdn.net/kwu_ganymede/article/details/50484025

/*val six = sc.textFile("/tmp/spark/six")  
var idx = 0;  
val res = six.filter(x => (x.trim().length>0) && (x.split(",").length==4)).map(_.split(",")(2)).map(x => (x.toInt,"")).sortByKey(false).map(x=>x._1).take(5)  
.foreach(x => {  
idx = idx+1  
println(idx +"\t"+x)})  */