package com.ipinyou.hb2hf

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import com.ipinyou.hbase.model.MUser
import com.ipinyou.hbase.service.impl.HBaseConnectionServiceImpl
import org.slf4j.LoggerFactory
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.SparkFiles

object CountUserByTag {

  val a = 1.0
  val b = 2.0

  // 0 测试, 1 正式
  var flag = "0"

  var output = "file:///Users/miaoyujia/tmp/hb2hdfs"
  var all_rule = ""
  // log
  val log = LoggerFactory.getLogger(this.getClass)

  def main(args: Array[String]): Unit = {

    if (args.length > 0) {
      flag = args(0)
      output = args(1)
      all_rule = args(2)
    }

    val config = new SparkConf().setAppName("mobile.catecount")
    val sc = new SparkContext(config)

    val conf = HBaseConfiguration.create()
    conf.set(TableInputFormat.INPUT_TABLE, "mobileprofile")
    conf.set(TableInputFormat.SCAN_MAXVERSIONS, "30")

    val hBaseRDD = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat],
      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
      classOf[org.apache.hadoop.hbase.client.Result])

    println("=====================================================")

    //    //遍历输出
    val resRdd = hBaseRDD.map {
      case (_, result) =>
        val rowkey = new String(result.getRow)
        val karr = rowkey.split("""\|""", -1)
        (karr(1), karr(2))
    }

    val zres = resRdd.countByKey()
//    sc.parallelize(zres.toList).saveAsTextFile(output)

    // 和中文匹配
    val rule_rdd = sc.textFile(all_rule)
    val cate_name_rdd = rule_rdd.map { line =>
      val arr = line.split("\t", -1)
      (arr(0), arr(1))
    }.distinct().flatMap {
      case (k, v) =>
        if (zres.contains(k)) {
          List(List(k, v, zres.get(k).get).mkString("\t"))
        } else {
          List()
        }
    }.saveAsTextFile(output)

  }
}