package liuzhou.spark.dao

import liuzhou.spark.beans.CategraySearchClickCount
import liuzhou.spark.common.HbaseUtils
import org.apache.hadoop.hbase.client.Get
import org.apache.hadoop.hbase.util.Bytes

import scala.collection.mutable.ListBuffer

object CategraySearchClickCountDAO {
  //注意这个表需要提前建立，否则报错
  //Table 'aqy_2' was not found, got: aqy_1.

  val tableName = "aqy_2"
  val cf = "info"
  val col = "search_click_count"

  /**
    * 保存数据
    * @param list
    */
  def save(list:ListBuffer[CategraySearchClickCount])={
    val table = HbaseUtils.getInstance().getHTable(tableName)
    for (e <- list){
      //将某一列结果进行累加再进行存储（实测值是二进制存储的）
      table.incrementColumnValue(Bytes.toBytes(e.daySearchCategary),Bytes.toBytes(cf),Bytes.toBytes(col),e.clickCount);
    }
  }

  /**
    * 根据ID来进行查询 搜索次数
    * @param dayClass
    * @return
    */
  def count(dayClass:String)={
    val table = HbaseUtils.getInstance().getHTable(tableName)
    val get = new Get(Bytes.toBytes(dayClass))
    val value = table.get(get).getValue(Bytes.toBytes(cf),Bytes.toBytes(col))
    if (null == value){
      0l
    }else{
      Bytes.toLong(value)
    }
  }

  //测试
  def main(args: Array[String]): Unit = {
    val list = new ListBuffer[CategraySearchClickCount]
    list.append(CategraySearchClickCount("20171122_1_1",300))
    list.append(CategraySearchClickCount("20171122_5_1",700))
    list.append(CategraySearchClickCount("20171122_1_2",600))
    save(list)
    println(count("20171122_1_2"))
  }

}
