package cn.tedu.dao

import java.util

import org.apache.spark.SparkContext
import cn.tedu.pojo.LogBean
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.fs.shell.find.Result
import org.apache.hadoop.hbase.client.Put

import scala.util.Random
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.filter.RowFilter
import org.apache.hadoop.hbase.filter.RegexStringComparator
import org.apache.hadoop.hbase.protobuf.ProtobufUtil
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
import org.apache.hadoop.hbase.Cell

object HBaseUtil {
  var sparkContext: SparkContext = null

  def saveToHBase(logBean: LogBean) = {
    val config = HBaseConfiguration.create()
    config.set("hbase.zookeeper.quorum", "mycentos8")
    config.set("hbase.zookeeper.property.clientPort", "2181")
    config.set("hbase.defaults.for.version.skip", "true")

    val r1 = sparkContext.makeRDD(List(logBean))

    // 初始化JobConf，TableOutputFormat 是 org.apache.hadoop.hbase.mapred 包下的
    val jobConf = new JobConf(config)
    jobConf.setOutputFormat(classOf[org.apache.hadoop.hbase.mapred.TableOutputFormat])

    //RDD[LogBean]->RDD[(输出key,输出value)]
    val hbaseRDD = r1.map { bean =>

      //本项目的行键设计为:sstime_uvid_ssid_cip_随机数字
      //首先行键以时间戳开头,则HBase会按时间做升序排序,所以有利于后续的按时间段范围查询
      //此外,行键中包含用户id,会话id和ip地址,便于后续结合HBase过滤器查询相关的数据
      //行键中包含随机数字,满足散列的设计原则

      val rowKey = logBean.sstime + "_" + logBean.uvid + "_" + logBean.ssid + "_" + logBean.cip + "_" + Random.nextInt(100)

      //导包:org.apache.hadoop.hbase.client.Put
      //创建一个HBase的行对象,并设定行键
      val row = new Put(rowKey.getBytes)

      // row.addColumn(Bytes.toBytes("info"), Bytes.toBytes("age"), Bytes.toBytes(arr(3).toInt))
      row.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("url"), Bytes.toBytes(bean.url))
      row.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("urlname"), Bytes.toBytes(bean.urlname))
      row.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("uvid"), Bytes.toBytes(bean.uvid))
      row.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("ssid"), Bytes.toBytes(bean.ssid))
      row.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("sscount"), Bytes.toBytes(bean.sscount))
      row.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("sstime"), Bytes.toBytes(bean.sstime))
      row.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("cip"), Bytes.toBytes(bean.cip))

      (new ImmutableBytesWritable, row)
    }

//    hbaseRDD.saveAsHadoopDataset(jobConf)
    config.set(TableOutputFormat.OUTPUT_TABLE, "weblog")
    val job = Job.getInstance(config)
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    //org.apache.hadoop.fs.shell.find.Result
    job.setOutputValueClass(classOf[Result])
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])

    //执行写出
    hbaseRDD.saveAsNewAPIHadoopDataset(job.getConfiguration)
  }

  def queryByRangeAndRegex(startTime: Long, endTime: Long, regex: String) = {
//    val hbaseConf = HBaseConfiguration.create()
//    hbaseConf.set("hbase.zookeeper.quorum", "mycentos8")
//    hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
//    hbaseConf.set(TableInputFormat.INPUT_TABLE, "weblog")
//    val scan = new Scan()
//    scan.setStartRow(startTime.toString().getBytes)
//    scan.setStopRow(endTime.toString().getBytes)
//    // 创建行键正则过滤器
//    val filter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator(regex))
//    // 绑定过滤器到scan对象中
//    scan.setFilter(filter)
//    // 设置scan对象，使其生效
//    hbaseConf.set(TableInputFormat.SCAN, Base64.encodeBytes(ProtobufUtil.toScan(scan).toByteArray()))
//    // 执行查询, 并得到结果集RDD
//    val resultRDD = sc.newAPIHadoopRDD(hbaseConf,
//      classOf[TableInputFormat],
//      classOf[ImmutableBytesWritable],
//      classOf[org.apache.hadoop.hbase.client.Result])
//    // 返回结果集RDD
//    resultRDD

    val hbaseConf = HBaseConfiguration.create()
    hbaseConf.set("hbase.zookeeper.quorum", "mycentos8")
    hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
    hbaseConf.set(TableInputFormat.INPUT_TABLE, "weblog")
    val conn = ConnectionFactory.createConnection(hbaseConf);
    val tableName = TableName.valueOf("weblog");
    val scan = new Scan()
    scan.setCaching(5000)
    scan.setStartRow(startTime.toString().getBytes)
    scan.setStopRow(endTime.toString().getBytes)
    // 创建行键正则过滤器
    val filter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator(regex))
    scan.setFilter(filter)

    val t = conn.getTable(tableName);
    val rs = t.getScanner(scan);
    val it = rs.iterator();
    // TODO 转RDD
    val resultList = new util.ArrayList[Cell]()
    while(it.hasNext()){
      val r = it.next();
      System.out.println(r.getColumnLatestCell(Bytes.toBytes("cf1"), Bytes.toBytes("url")));
      val cell = r.getColumnLatestCell(Bytes.toBytes("cf1"), Bytes.toBytes("url"))
      resultList.add(cell)
    }
    resultList
  }
}
