import org.apache.spark.{SparkContext,SparkConf}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext}


case class Group(name:String){
val conf = new SparkConf().setAppName("WordCount").setMaster("local") //先生成RDD的配置对象conf，配置名称WordCount，运行在本地模式  
val sc = new SparkContext(conf)                                                                             
val rawblock1 = sc.textFile("hdfs://hadoopmaster:9000/stockdata/block_1.csv")//读取一个表格的数据到RDD  
val rawblock2 = sc.textFile("hdfs://hadoopmaster:9000/stockdata/block_2.csv")  
val raws = rawblock1.union(rawblock2)                                                                 //合并RDD  

def isHeader(line:String) = line.contains("id_1")            //包含字符“id_1”行就是表格头部  
val noheader = raws.filter(!isHeader(_))                           //RDD是只读的，把不包含头部的数据筛选出来生成一个新的RDD  

case class MatchData(id1: Int, id2: Int,scores: Array[Double], matched: Boolean)  
def toDouble(s: String) = {if ("?".equals(s)) Double.NaN else s.toDouble}    

def parse(line: String) = {  
      val pieces = line.split(',')  
      val id1 = pieces(0).toInt  
      val id2 = pieces(1).toInt  
      val scores = pieces.slice(2, 11).map(toDouble)  
      val matched = pieces(11).toBoolean  
      MatchData(id1, id2, scores, matched)  
    }  

val parsed = noheader.map(line => parse(line))  
val grouped = parsed.groupBy(_.matched)  
grouped.mapValues(_.size).foreach(println)  
sc.stop()

}
