package OutOfLine

import com.alibaba.fastjson.{JSON, JSONArray}
import org.apache.commons.lang.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

import scala.collection.immutable.TreeSet
import scala.collection.mutable

object SailTop3 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName(s"${this.getClass.getName}")
      .master("local[*]")
      .getOrCreate()
    val sc = spark.sparkContext
    val logRDD: RDD[String] = sc.textFile("D:\\asa\\dianshang\\pay - 副本.log")

    val filter = logRDD.map(line => {
      val nObject = JSON.parseObject(line)
      //获取pay_status：1：成功，0：失败
      var pay_status = nObject.getString("pay_status")
      val province = nObject.getString("province")
      val city = nObject.getString("city")

      if (StringUtils.isEmpty(pay_status)) {
        pay_status = "0"
      }

      var list = List[Double]()
      var money = "0.0"
      if (line.contains("goods")) {
        val goodsArr: JSONArray = nObject.getJSONArray("goods")
        if (goodsArr.size() > 0) {
          for (o <- 0 until goodsArr.size()) {
            val goodsArrObject = goodsArr.getJSONObject(o)
            //获取money
            var money = goodsArrObject.getString("money")
            if (StringUtils.isEmpty(money)) {
              list :+= 0.0
            } else {
              list :+= money.toDouble
            }

          }
        }

      }
      (pay_status.toInt, province,city,list)
    }).filter(x => (x._1 == 1))
    val reduce  = filter.map(x => ((x._2, x._3),x._4.sum)).reduceByKey(_+_)

    reduce.groupBy(_._1._1).mapValues(it=> {
      val treeSet = new mutable.TreeSet[((String, String), Double)]()(Ordering[Double].on(t => -t._2))
      it.foreach(x => {
        treeSet.add(x)
        if (treeSet.size > 3) {
          treeSet.remove(treeSet.last)
        }
      })
      treeSet
    }).foreach(println)
    //(山西省,TreeSet(((山西省,长治市),17000.0)))
    //(北京市,TreeSet(((北京市,北京市),5000.0)))
    //(西藏自治区,TreeSet(((西藏自治区,那曲地区),5000.0)))
    //(河南省,TreeSet(((河南省,洛阳市),60000.0)))


   /* implicit val ordering=Ordering[Double].on[((String, String), Double)](x=> -x._2)
    val sort = reduce.groupBy(_._1._1).sortBy(x=>x)

    sort
      .take(2)
      .foreach(println)*/
//(北京市,CompactBuffer(((北京市,北京市),5000.0)))
    //(山西省,CompactBuffer(((山西省,长治市),17000.0)))

    spark.stop()
  }
}
