import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkConf, SparkContext}


object demo4 {
   def main(args: Array[String]) {
     val filePath="hdfs:///test/res"
     val sparkConf=new SparkConf().setAppName("workCount")
     val sc= new SparkContext(sparkConf)
      val hdfs=FileSystem.get(new Configuration())
       if(hdfs.exists(new Path(filePath)))hdfs.delete(new Path(filePath),true)
       val f =sc.textFile("hdfs:///test/test")

      val r= f.map(line=>line.split(",")).map(line=>(line(1)+line(2),line)).reduceByKey((a,b)=>{
        val s1=a
        val s2=b
        if( Integer.parseInt(s1(0))> Integer.parseInt(s2(0))) a else b
      })



      f.flatMap(line =>line.split(" "))
        .map(line =>(line,1))
         .reduceByKey(_ + _)
         .map(line=>line._1+","+line._2)
         .saveAsTextFile(filePath)

     val links = sc.parallelize(Array(('A',Array('D')),('B',Array('A')),
       ('C',Array('A','B')),('D',Array('A','C'))),2).map(x => (x._1, x._2)).cache()

     var ranks = sc.parallelize(Array(('A',1.0),('B',1.0),('C',1.0),('D',1.0)), 2)




















   }














 }