package Demo1

import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by lenovo on 2017/9/22.
  */
object test5 {

  def main(args: Array[String]) {
    System.setProperty("hadoop.home.dir","E://hadoop-liyadong//hadoop-2.7.1")

    val conf = new SparkConf().setMaster("local[2]").setAppName("test5").set("spark.testing.memory","2147480000")
    val sc = new SparkContext(conf)

    val rdd1 = sc.textFile("C:\\Users\\lenovo\\Desktop\\sougou.txt")
    //println(rdd1.count())
    val rdd2 = rdd1.map(_.split("\t")).filter(_.length == 6).cache()
   // println(rdd2.count()+"*")
    val rdd3 = rdd2.filter(_(3).toInt == 1).filter(_(4).toInt == 1)
    println(rdd3.count()+"*")
    println(rdd3.toDebugString)
    val rdd4 = rdd2.map(x => (x(1),1)).reduceByKey(_+_).map(x => (x._2,x._1)).sortByKey(false).map(x => (x._2,x._1))
   // println(rdd4.toString()+"&")
  rdd4.saveAsTextFile("D:/sougouResult2")
  }
}
