package com.example.bigdata.spark.SparkTest

import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}

object RDD高级操作 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[1]").setAppName(ChapterDemo.getClass.getSimpleName)
    Logger.getLogger("org.apache.spark").setLevel(Level.OFF) //Level.OFF   Level.INFO
    val sc = new SparkContext(conf)
    //缓存RDD(sc)
    //wordCount(sc)
    //累加器(sc)
    累加器_集合(sc)
    //test1(sc)
    val json = """ {   "arrayc" : [ 1, 2, 3 ],   "structc" : {     "strc" : "efg",     "decimalc" : 1.1   },   "mapc" : {     "key1" : 1.2,     "key2" : 1.1   } } """

    sc.stop()
  }

  def  缓存RDD (sc:SparkContext): Unit ={
    val sogouRDD = sc.textFile("input/SogouQ_sample.txt")
    val rddData1 = sogouRDD.map(
      t => (t.split("\t")(1),1)
    )
    rddData1.cache()
    val rddData2 = rddData1.reduceByKey( _+_)
    println(rddData2.collect.mkString(":"))
    val rddData3 = rddData2.sortBy(_._2,false)
    println(rddData3.take(5).mkString)
    Thread.sleep(60000)
    println(rddData1.count())
    Thread.sleep(60000)
    println(rddData1.take(5).mkString)
  }

  def wordCount(sc:SparkContext): Unit = {
    val f = sc.textFile("input/words6.3.3.txt")
    val rd1 = f.flatMap(_.split(","))
    val rd2 = rd1.map((_,1))
    val rd3 = rd2.reduceByKey(_+_,2)
    val rd4 = rd3.sortBy(_._2,false)
    println(rd4.collect.mkString)
  }


  def 累加器(sc:SparkContext): Unit ={
    val visitorRDD = sc.parallelize(Array(("Bob",15),("Thomas",28),("Tom",18),("Galen",35),("Catalina",12),("Karen",9),("Boris",20)),3)
    val visitorAccumulator = sc.longAccumulator("统计成年游客人数")
    //方法1:
    visitorRDD.foreach(visitor =>{
      if(visitor._2 >= 18){
        visitorAccumulator.add(1)
        println(visitor.toString() +"  ---> ")
      }
    })
    println(visitorAccumulator)
    val count = visitorRDD.filter(_._2 >=18).count()
    println("count : " + count)
  }


  case class User(val name: String, val tel: String)

  def 累加器_集合(sc:SparkContext): Unit ={
    val userArray = Array(User("Alice","15839493345"),
      User("Bob","15839493666"),
      User("Thomas","15839493345"),
      User("Tom","15839493777"),
      User("Boris","15839493998"))
    val userRDD =sc.parallelize(userArray,2)
    //方法1
    val userAccumulator = sc.collectionAccumulator[User]("用户累加器")
    userRDD.foreach( user => {
      val telphone = user.tel.reverse
      if(telphone(0) == telphone(1) && telphone(0) == telphone(2)) {
        userAccumulator.add(user)
      }
    })
    println(userAccumulator)
    //方法2
    println(userRDD.filter(user => {
      val tel = user.tel.reverse
      tel(0) == tel(1) && tel(1) == tel(2)
    }).count())


  }

  //编写Spark应用程序，该程序对HDFS文件中的数据文件peopleinfo.txt进行统计，
  //计算得到男性总数、女性总数、男性最高身高、女性最高身高、男性最低身高、女性最低身高。
  case  class People(id:String,sex:String,height:Int)
  def test1(sc:SparkContext): Unit ={
    val f1 = sc.textFile("input/peopleinfo.txt")
    //将数据转换成People 类
    val p1 = f1.map(line => line.split(","))
      .map(p=>People(p(0),p(1),p(2).toInt))
    // 筛选男生
    val Frdd1 = p1.filter(p=> p.sex == "F")
    // 筛选女生
    val Mrdd1 = p1.filter(p=> p.sex == "M")
    println("男性总数 : " + Frdd1.count())
    Frdd1.foreach(println(_))
    //男性最高身高
    val Frdd2 = Frdd1.sortBy(_.height,false).take(1).map(x=> x.height).mkString
    //女性最高身高
    val Mrdd2 = Mrdd1.sortBy(_.height,false).take(1).map(x=> x.height).mkString
    //男性最低身高
    val Frdd3 = Frdd1.sortBy(_.height,true).take(1).map(x=> x.height).mkString
    //女性最低身高
    val Mrdd3 = Mrdd1.sortBy(_.height,true).take(1).map(x=> x.height).mkString
    println("男性最高身高 : " + Frdd2)
    println("男性最低身高 : " + Frdd3)
    println("\n女性总数 : " + Mrdd1.count())
    Mrdd1.foreach(println(_))
    println("女性最高身高 : " + Mrdd2)
    println("女性最低身高 : " + Mrdd3)
  }
}
