package com.ipinyou.offscore

import scala.collection.mutable.ArrayBuffer

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.slf4j.LoggerFactory

import com.ipinyou.hbase.model.MUser
import com.ipinyou.hbase.service.impl.HBaseConnectionServiceImpl

object hf2hb {

  // 0 测试, 1 正式
  var flag = "0"
  var path = "file:///Users/miaoyujia/git/mobile-userprofile-src/offline_score/file/py_cate_score"

  val hbaseServerce = new HBaseConnectionServiceImpl();

  // log
  val log = LoggerFactory.getLogger(this.getClass)

  def getPyPartiton(pyid: String): String = {
    val py_hash = pyid.hashCode()
    val py_partition = Math.abs(py_hash) % 1050
    val partition = "%04d".format(py_partition)
    partition
  }

  /**
   * Spark批量读Hbase
   */
  def getFromHbaseByBatch(iter: Iterator[(String, Float)]): Iterator[(String, Float)] = {
    val muser = new MUser()
    val emMap = new scala.collection.mutable.HashMap[String, Float]

    while (iter.hasNext) {
      val arr = iter.next()
      val id = arr._1
      val sweight = arr._2
      emMap(id) = sweight
    }
    val idlist = emMap.keySet.toArray
    val rlist = hbaseServerce.get(muser, idlist: _*)

    for (i <- 0 until rlist.size()) {
      val ruser = rlist.get(i)
      val uid = ruser.getId
      val weight = ruser.getContent.toFloat

      val sweight = emMap.getOrElse(uid, 0.toFloat)
      emMap(uid) = weight + sweight
    }
    emMap.toArray.iterator
  }

  def save2HbaseByBatch(iter: Iterator[(String, Float)]) {
    val musers = new ArrayBuffer[MUser]()
    while (iter.hasNext) {
      val muser = new MUser()
      val arr = iter.next()
      val rowkey = arr._1
      val weight = arr._2
      muser.setId(rowkey)
      muser.setContent(weight.toString)
      musers.append(muser)
    }
    hbaseServerce.insert(musers: _*)
  }

  def main(args: Array[String]): Unit = {

    if (args.length > 0) {
      flag = args(0)
      path = args(1)
    }

    val config = new SparkConf().setAppName("mobile.off.hf2hb")
    val sc = new SparkContext(config)

    val srcRdd = sc.textFile(path)
    val resRdd = srcRdd.map { x =>
      val arr = x.split("\t", -1)
      val pyid = arr(0)
      val label = arr(1)
      val weight = arr(2)

      // hash&取模
      val py_partition = getPyPartiton(pyid)
      val pyid_label_score = (List(py_partition, label, pyid).mkString("|"), weight.toFloat)
      pyid_label_score
    }.repartition(30)

    //遍历输出
    resRdd.mapPartitions(getFromHbaseByBatch).foreachPartition(save2HbaseByBatch)
  }
}