package com.scala.business

import com.google.gson.Gson
import com.scala.pojo.AllBasicInfoScala
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Take02Core {
  val gson: Gson = new Gson()

  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setAppName("no1 speed").setMaster("local")
    val sc: SparkContext = new SparkContext(sparkConf)
    val rdd: RDD[String] = sc.textFile("hdfs://master:9000/air_data/*")

    val res = rdd.map(x => gson.fromJson(x, classOf[AllBasicInfoScala]))
      .filter(x => x.ffpTier != null && x.epSum != null && x.bpSum != null && x.age != null)
      .map(x => (x.ffpTier, (x.age.toDouble, x.epSum.toDouble, x.bpSum.toDouble, 1L, null, null)))
      .reduceByKey((a, b) => {
        val allAge: Double = a._1 + b._1 // 总年龄
        val allEpSum: Double = a._2 + b._2 // 总精英积分
        val allBpSum: Double = a._3 + b._3 // 总基础积分
        val count: Long = a._4 + b._4 // 总人数
        (allAge, allEpSum, allBpSum, count, null, null)
      })
      .map(x => {
        val tmp = x._2;
        val meanAge = (tmp._1 / tmp._4).toInt // 平均年龄
        val meanRatio = (tmp._2 / tmp._3).formatted("%.3f").toDouble // 平均比值
        (x._1, (tmp._1, tmp._2, tmp._3, tmp._4, meanAge, meanRatio))
      })
    // 打印观察
    val collect = res.collect();
    for (elem <- collect) {
      printf("会员等级为：%d 的积分精英积分和基础积分的比值为：%s,总人数为：%d,平均年龄为：%s", elem._1, elem._2._6, elem._2._4, elem._2._5)
      println()
    }
    sc.stop()
  }
}
