package com.jianghang.class_three.log_format.APP

import java.net.URI
import java.util.Locale
import java.util.zip.CRC32

import org.apache.commons.lang3.StringUtils
import org.apache.commons.lang3.time.FastDateFormat
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, LoadIncrementalHFiles, TableOutputFormat}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HColumnDescriptor, HTableDescriptor, KeyValue, TableName}
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.udf

import scala.collection.mutable.ListBuffer

/**
  * ip country province city time method url protocal http_status bytes_sent referer
  * user_agent browser_name browser_version engine_name engine_version os_name platform_name  is_molbie
  *
  * 把数据直接写入HFile，再把数据load进HBase表
  *
  * https://www.cnblogs.com/houji/p/7382996.html
  *
  *
  */
object _040_LogFormatToSaveHBase_improve {

  def main(args: Array[String]): Unit = {
    val session = SparkSession.builder()
      .master("local[4]")
      .appName("_030_Log")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()

    val day = "042"
    //val day = args(0)

    var logDF = session.read.json("data/output/LogFormatApp2/*")

    // UDF函数的写法
    def formatTime() = udf(
      (time: String) => {
        //[30/Jan/2019:00:00:21 +0800]
        val beginIndex = time.indexOf("[") + 1
        val endIndex = time.lastIndexOf("]")
        val time_str = time.substring(beginIndex, endIndex)
        val time_long = FastDateFormat.getInstance("dd/MMM/yyyy:HH:mm:ss Z", Locale.ENGLISH)
          .parse(time_str).getTime
        val time_format_str = FastDateFormat.getInstance("yyyyMMddHHmm").format(time_long)
        time_format_str
      }
    )

    //添加dataFrame新列formattime，或者替换原有的列
    logDF = logDF.withColumn("formattime", formatTime()(logDF("time")))

    logDF.show()


    val hbaseInfoRDD = logDF.rdd.mapPartitions(partition => {
      partition.flatMap(x => {
        val ip = x.getAs[String]("ip")
        val country = x.getAs[String]("country")
        val province = x.getAs[String]("province")
        val city = x.getAs[String]("city")
        val formattime = x.getAs[String]("formattime")
        val method = x.getAs[String]("method")
        val url = x.getAs[String]("url")
        val protocal = x.getAs[String]("protocal")
        val http_status = x.getAs[String]("http_status")
        val bytes_sent = x.getAs[String]("bytes_sent")
        val referer = x.getAs[String]("referer")
        val browser_name = x.getAs[String]("browser_name")
        val browser_version = x.getAs[String]("browser_version")
        val os_name = x.getAs[String]("os_name")
        val user_agent = x.getAs[String]("user_agent")

        val columns = scala.collection.mutable.HashMap[String, String]()
        columns.put("ip", ip)
        columns.put("country", country)
        columns.put("province", province)
        columns.put("city", city)
        columns.put("formattime", formattime)
        columns.put("method", method)
        columns.put("url", url)
        columns.put("protocal", protocal)
        columns.put("http_status", http_status)
        columns.put("bytes_sent", bytes_sent)
        columns.put("referer", referer)
        columns.put("browser_name", browser_name)
        columns.put("browser_version", browser_version)
        columns.put("os_name", os_name)
        // HBase API  Put

        val rowkey = getRowKey(day, referer + url + ip + user_agent) // HBase的rowkey
        val rk = Bytes.toBytes(rowkey)

        val list = new ListBuffer[((String, String), KeyValue)]()
        // 每一个rowkey对应的cf中的所有column字段
        var keyValue: KeyValue = null
        for ((k, v) <- columns) {
          if (v == null) {
            keyValue = new KeyValue(rk, "log_format".getBytes, Bytes.toBytes(k), Bytes.toBytes("-"))
          } else {
            keyValue = new KeyValue(rk, "log_format".getBytes, Bytes.toBytes(k), Bytes.toBytes(v))
          }

          list += {
            (rowkey, k) -> keyValue  // （  (rowkey, k) ，keyValue  ）
          }
        }

        list.toList //转换为不可变的list,虚拟机会有相关优化，节省内存
      }
      )
    }).sortByKey()
      .map(x => (new ImmutableBytesWritable(Bytes.toBytes(x._1._1))   ,    x._2))
    //(ImmutableBytesWritable  ,  KeyValue)


    //hbaseInfoRDD.collect().foreach(println)


    val conf = new Configuration()
    conf.set("hbase.rootdir", "hdfs://hadoop000:8020/hbase")
    conf.set("hbase.zookeeper.quorum", "hadoop000:2181")

    val tableName = createTable(day, conf)

    // 设置写数据到哪个表中
    conf.set(TableOutputFormat.OUTPUT_TABLE, tableName)


    val job = Job.getInstance(conf)
    val table = new HTable(conf, tableName)
    // 了解即可
    HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor, table.getRegionLocator)

    val output = "hdfs://hadoop000:8020/etl/access/hbase"
    val outputPath = new Path(output)
    val outputPath1 = new Path("/etl/access/hbase")

    val HDFS_URL = "hdfs://hadoop000:8020"
    val fileSystem = FileSystem.get(new URI(HDFS_URL), conf, "hadoop")

    if (fileSystem.exists(outputPath1)) {
      fileSystem.delete(outputPath1, true)
      println("delete :" + outputPath1)
    }

    hbaseInfoRDD.saveAsNewAPIHadoopFile(
      output,
      classOf[ImmutableBytesWritable],
      classOf[KeyValue],
      classOf[HFileOutputFormat2],
      job.getConfiguration
    )


    if (fileSystem.exists(outputPath1)) {
      val load = new LoadIncrementalHFiles(job.getConfiguration)
      load.doBulkLoad(outputPath, table)
      fileSystem.delete(outputPath1, true)
      println("delete :" + outputPath1)
    }


    session.stop()
  }


  def getRowKey(time: String, info: String) = {

    /**
      * 由于rowkey是采用time_crc32(info)进行拼接
      * 只要是字符串拼接，尽量不要使用+
      *
      * StringBuffer vs StringBuilder
      */

    val builder = new StringBuilder(time)
    builder.append("_")

    val crc32 = new CRC32()
    crc32.reset()
    if (StringUtils.isNotEmpty(info)) {
      crc32.update(Bytes.toBytes(info))
    }
    builder.append(crc32.getValue)

    builder.toString()
  }

  def createTable(day: String, conf: Configuration) = {
    val table = "nginx_log_" + day

    var connection: Connection = null
    var admin: Admin = null
    try {
      connection = ConnectionFactory.createConnection(conf)
      admin = connection.getAdmin

      /**
        * 这个Spark作业是离线的，然后一天运行一次，如果中间处理过程中有问题
        * 下次重跑的时候，应该先把表数据清空，然后重新写入
        */
      val tableName = TableName.valueOf(table)
      if (admin.tableExists(tableName)) {
        admin.disableTable(tableName)
        admin.deleteTable(tableName)
      }

      val tableDesc = new HTableDescriptor(TableName.valueOf(table))
      val columnDesc = new HColumnDescriptor("log_format")
      tableDesc.addFamily(columnDesc)
      admin.createTable(tableDesc)
    } catch {
      case e: Exception => e.printStackTrace()
    } finally {
      if (null != admin) {
        admin.close()
      }

      if (null != connection) {
        connection.close()
      }
    }

    table
  }


}
