package com.jianghang.class_three.log_format.APP

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.CellUtil
import org.apache.hadoop.hbase.client.{Result, Scan}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.protobuf.ProtobufUtil
import org.apache.hadoop.hbase.util.{Base64, Bytes}
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

/**
  *
  * 使用Spark对HBase中的数据做统计分析操作
  *
  * 1） 统计每个国家每个省份的访问量
  * 2） 统计不同浏览器的使用量
  */
object AnalysisApp {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .appName("AnalysisApp")
      .master("local[2]")
      .getOrCreate()

    spark.sparkContext.setLogLevel("ERROR")


    // 获取要进行统计分析的日期
    val day = "20190230"

    // 连接HBase
    val conf = new Configuration()
    conf.set("hbase.rootdir", "hdfs://hadoop000:8020/hbase")
    conf.set("hbase.zookeeper.quorum", "hadoop000:2181")

    val tableName = "nginx_log_" + day
    //INPUT_TABLE，OUTPUT_TABLE，是相对于Map/Reduce任务为参照。
    conf.set(TableInputFormat.INPUT_TABLE, tableName) // 要从哪个表里面去读取数据

    val scan = new Scan()

    // 设置要查询的cf
    scan.addFamily(Bytes.toBytes("log_format"))

    // 设置要查询的列
    scan.addColumn(Bytes.toBytes("log_format"), Bytes.toBytes("country"))
    scan.addColumn(Bytes.toBytes("log_format"), Bytes.toBytes("province"))
    scan.addColumn(Bytes.toBytes("log_format"), Bytes.toBytes("browser_name"))

    // 设置Scan
    conf.set(TableInputFormat.SCAN, Base64.encodeBytes(ProtobufUtil.toScan(scan).toByteArray))

    // 通过Spark的newAPIHadoopRDD读取数据
    val hbaseRDD: RDD[(ImmutableBytesWritable, Result)] = spark.sparkContext.newAPIHadoopRDD(conf,
      classOf[TableInputFormat],
      classOf[ImmutableBytesWritable],
      classOf[Result]
    )
    println("********************************hbaseRDD**************************************")
    hbaseRDD.take(10).foreach(println)

    hbaseRDD.take(50).foreach(x => {
      val rowKey = Bytes.toString(x._1.get())

      for (cell <- x._2.rawCells()) {
        val row = Bytes.toString(CellUtil.cloneRow(cell))
        val cf = Bytes.toString(CellUtil.cloneFamily(cell))
        val qualifier = Bytes.toString(CellUtil.cloneQualifier(cell))
        val value = Bytes.toString(CellUtil.cloneValue(cell))
        println(s"$rowKey :$row: $cf : $qualifier : $value")
      }
    })


    // todo 第一次讲解

    /**
      * Spark优化中最常用的一个优化点：Cache
      */


    hbaseRDD.cache()//把rdd持久化到内存中

    println("统计每个国家每个省份的访问量  ==> TOP10")
    //todo...  统计每个国家每个省份的访问量  ==> TOP10
    //hbaseRDD数据类型：(ImmutableBytesWritable, Result)==>(rowKey,Result)
    hbaseRDD.map(x => {
      val country = Bytes.toString(x._2.getValue("log_format".getBytes, "country".getBytes))
      val province = Bytes.toString(x._2.getValue("log_format".getBytes, "province".getBytes))

      ((country, province), 1)
    }).reduceByKey(_ + _)//.reduceByKey((x,y)=>(x+y))
      .map(x => (x._2, x._1))// (hello,3)=>(3,hello)
      .sortByKey(false)//根据key降序排序
      .map(x => (x._2, x._1))// (3,hello)=>(hello,3)
      .take(10).foreach(println)

    import spark.implicits._
    hbaseRDD.map(x => {
      val country = Bytes.toString(x._2.getValue("log_format".getBytes, "country".getBytes))
      val province = Bytes.toString(x._2.getValue("log_format".getBytes, "province".getBytes))

      CountryProvince(country, province)
    })
      .toDF
      .select("country", "province")
      .groupBy("country", "province")
      .count()//计数
      .sort($"count".desc)//降序排序
      .show(10, false)



    //todo...  统计不同浏览器的访问量
    println("统计不同浏览器的访问量")

    hbaseRDD.map(x => {
      val browser_name = Bytes.toString(x._2.getValue("log_format".getBytes, "browser_name".getBytes))
      (browser_name, 1)
    })
      .reduceByKey(_ + _)
      .map(x => (x._2, x._1))
      .sortByKey(false)
      .map(x => (x._2, x._1))
      .take(10).foreach(println)

    hbaseRDD.map(x => {
      val browser_name = Bytes.toString(x._2.getValue("log_format".getBytes, "browser_name".getBytes))
      Browser(browser_name)
    }).toDF()
      .select("browser_name")
      .groupBy("browser_name")
      .count()
      .sort($"count".desc)
      .show(false)


    hbaseRDD.map(x => {
      val browser_name = Bytes.toString(x._2.getValue("log_format".getBytes, "browser_name".getBytes))
      Browser(browser_name)
    }).toDF().createOrReplaceTempView("tmp")

    spark.sql("select browser_name,count(1) cnt from tmp group by browser_name order by cnt desc").show(false)

    //释放缓存
    hbaseRDD.unpersist(true)

    spark.stop()
  }

  case class CountryProvince(country: String, province: String)

  case class Browser(browser_name: String)

}
