package com.ipinyou.mprofile

import java.lang.String

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions
import org.apache.spark.streaming.Minutes
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream.toPairDStreamFunctions
import org.apache.spark.streaming.kafka.KafkaUtils
import org.slf4j.LoggerFactory
import org.slf4j.MarkerFactory

import com.ipinyou.hbase.model.MUser
import com.ipinyou.hbase.service.impl.HBaseConnectionServiceImpl

import kafka.serializer.StringDecoder
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.storage.StorageLevel

/**
 * Created by yuwenbing on 15/12/1.
 */

object KafkaOld {

  var sc: SparkContext = null

  // log
  val log = LoggerFactory.getLogger(this.getClass)
  val hbaseServerce = new HBaseConnectionServiceImpl();
  val PARTITIONS = 1050

  def array2map(arr: Array[(String, String)]) = {
    val themap = new scala.collection.mutable.HashMap[String, String]
    for (element <- arr) {
      themap += (element._1 -> element._2)
    }
    themap
  }
  def key2value(line: (String, String), m: scala.collection.mutable.HashMap[String, String]) = {
    var result = List(("error", "error"))
    val feat = line._1
    val pyid = line._2
    val value = m.getOrElse(feat, "error")

    if (value.contains("____")) {
      val segs = value.split("____")
      for (ele <- segs) {
        result = result.+:(ele, pyid)
      }
    } else {
      result = result.+:(value, pyid)
    }
    result
  }

  def getFromHbase(iter: Iterator[(String, Float)]): Iterator[(String, Float)] = {
    val muser = new MUser()
    var reslist = List[(String, Float)]()

    while (iter.hasNext) {
      val arr = iter.next()
      val id = arr._1
      val sweight = arr._2
      val rlist = hbaseServerce.get(muser, id)
      if (!rlist.isEmpty()) {
        val ruser = rlist.get(0)
        val uid = ruser.getId
        val weight = ruser.getContent.toFloat
        reslist = reslist.::(uid, weight + sweight)
      } else {
        reslist = reslist.::(id, sweight)
      }
    }
    reslist.iterator
  }

  /**
   * Spark批量读Hbase
   */
  def getFromHbaseByBatch(iter: Iterator[(String, Float)]): Iterator[(String, Float)] = {
    val muser = new MUser()
    val emMap = new scala.collection.mutable.HashMap[String, Float]

    while (iter.hasNext) {
      val arr = iter.next()
      val id = arr._1
      val sweight = arr._2
      emMap(id) = sweight
    }
    val idlist = emMap.keySet.toArray
    val rlist = hbaseServerce.get(muser, idlist: _*)

    for (i <- 0 until rlist.size()) {
      val ruser = rlist.get(i)
      val uid = ruser.getId
      val weight = ruser.getContent.toFloat

      val sweight = emMap.getOrElse(uid, 0.toFloat)
      emMap(uid) = weight + sweight
    }
    emMap.toArray.iterator
  }

  def getField(line: String, partitionIndex: Int, fieldIndex: Int, fieldCounts: Int): String = {

    var res: String = "error"
    val segs = line.split("\u0001", -1)
    if (segs.length == 13) {
      // 字段
      val partitionContent = segs(partitionIndex)
      val arr = partitionContent.split("\u0002", -1)
      if (arr.length >= fieldCounts) {
        res = arr(fieldIndex)
      }
    }
    res
  }

  def save2Hbase(iter: Iterator[(String, Float)]) {
    val muser = new MUser()
    while (iter.hasNext) {
      val arr = iter.next()
      val rowkey = arr._1
      val weight = arr._2
      muser.setId(rowkey)
      muser.setContent(weight.toString())
      hbaseServerce.insert(muser)
    }
  }

  def save2HbaseByBatch(iter: Iterator[(String, Float)]) {
    val musers = new ArrayBuffer[MUser]()
    while (iter.hasNext) {
      val muser = new MUser()
      val arr = iter.next()
      val rowkey = arr._1
      val weight = arr._2
      muser.setId(rowkey)
      muser.setContent(weight.toString)
      musers.append(muser)
    }
    hbaseServerce.insert(musers: _*)
  }

  def getPyPartiton(pyid: String): String = {
    val py_hash = pyid.hashCode()
    val py_partition = Math.abs(py_hash) % PARTITIONS
    val partition = "%04d".format(py_partition)
    partition
  }

  def main(args: Array[String]): Unit = {

    // local field

    // paremeters
    var model_file = ""
    var basefeat2extentfeat_file = ""
    var intervel = 0
    val config = new SparkConf().setAppName("mobile.scoring")
    // spark  streaming  context
    var ssc: StreamingContext = null

    if (args.length == 0) {
      model_file = "file:///Users/miaoyujia/git/mobile-userprofile-src/mprofile/file/base"
      basefeat2extentfeat_file = "file:///Users/miaoyujia/git/mobile-userprofile-src/mprofile/file/basefeat2extentfeat"
      config.setMaster("local[2]")
      if (sc == null) sc = new SparkContext(config)
      ssc = new StreamingContext(sc, Seconds(5))
    } else {
      model_file = args(0)
      basefeat2extentfeat_file = args(1)
      intervel = args(2).toInt
      if (sc == null) sc = new SparkContext(config)
      ssc = new StreamingContext(sc, Minutes(intervel))
    }
    log.info(MarkerFactory.getMarker("YuJia"), "Scoring, {}", Array("model_file", model_file, "basefeat2extentfeat_file", basefeat2extentfeat_file))

    // model_RDD basefeat2extentfeat_RDD
    val model_map = getBaseFeature(sc, model_file)
    val basefeat2extentfeat_map = getBaseExFeature(sc, basefeat2extentfeat_file)

    val cousume_group = "mobileprofile_t1"

    // TODO 商品规则 -> 访客, 转化
    val old_brokers = "192.168.145.201:9092," +
      "192.168.145.202:9092," +
      "192.168.145.203:9092," +
      "192.168.145.204:9092," +
      "192.168.145.205:9092," +
      "192.168.145.206:9092," +
      "192.168.145.207:9092," +
      "192.168.145.208:9092," +
      "192.168.145.209:9092," +
      "192.168.145.210:9092"
    val old_kafkaParams = Map[String, String]("metadata.broker.list" -> old_brokers, "group.id" -> cousume_group)
    val old_topics = Set("adv", "cvt")
    val old_msgDStreamRdd = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, old_kafkaParams, old_topics)

    // 访客, 转化商品 feature
    val old_msg_rdd = old_msgDStreamRdd.flatMap { x =>
      val line = x._2
      val arr = line.split("\t", -1)
      val ActionType = arr(2)
      if (arr.length > 66) {
        val devicetype = arr(41)
        val pyid = arr(15)

        var appid = arr(60)
        var productid = ""
        if (ActionType == "Conversion") {
          productid = arr(66)
        } else {
          productid = arr(54)
        }
        if (devicetype != "General") {
          //          if (!pyid.isEmpty() && !productid.isEmpty()) List(("productid:" + productid, pyid)) else List()
          if (!pyid.isEmpty() && !appid.isEmpty()) List(("appid:" + appid, pyid)) else List()

        } else {
          List()
        }
      } else {
        List()
      }
    }

        old_msg_rdd.print(10)

    // scoring
//    val action_score = {
//      old_msg_rdd.flatMap(line => key2value(line, model_map)).
//        filter(!_._1.contains("error")).map { line =>
//          val pyid = line._2
//          val label = line._1.split("__")(0)
//          val weight = line._1.split("__")(1)
//
//          // hash&取模
//          val py_partition = getPyPartiton(pyid)
//          val pyid_label_score = (List(py_partition, label, pyid).mkString("|"), weight.toFloat)
//          pyid_label_score
//        }.reduceByKey(extracted(_, _), 50)
//    }
//
//    action_score.print(10)
    //
    //    // reduceByKeyLocally
    //    log.info("action_score ----------------------------------------------------- end")
    //
    //    // 从Hbase中读取merge并写入到Hbase
    //    action_score.mapPartitions(getFromHbaseByBatch).foreachRDD(rdd => rdd.foreachPartition(save2HbaseByBatch))

    ssc.start()
    ssc.awaitTermination()
  }

  def extracted(x: Float, y: Float): Float = {
    x + y
  }

  def getBaseFeature(sc: org.apache.spark.SparkContext, model_file: String) = { // 累加器

    // model_RDD basefeat2extentfeat_RDD
    val model = sc.textFile(model_file)
    log.info("model count : " + model.count())

    val model_RDD = model.map { line =>
      val seg = line.trim().split("\t", -1)
      if (seg.length == 3) {
        val label = seg(0)
        val feature = seg(1)
        val weight = seg(2);
        val model_record = (feature, List(label, weight).mkString("__"))
        model_record
      } else
        ("error", "error")
    }.filter(!_._1.contains("error"))

    model_RDD.take(10).foreach(println)
    log.info("model_RDD -------------------------- end")
    val model_group = model_RDD.groupByKey().map(line => (line._1, line._2.mkString("____"))).collect()
    val model_map = array2map(model_group)
    model_map
  }

  def getBaseExFeature(sc: SparkContext, basefeat2extentfeat_file: String) = {
    val basefeat2extentfeat = sc.textFile(basefeat2extentfeat_file)
    log.info("basefeat2extentfeat count : " + basefeat2extentfeat.count())
    val basefeat2extentfeat_RDD = basefeat2extentfeat.map { line =>
      val seg = line.trim().split("\t", -1)
      if (seg.length == 2) {
        val basefeat = seg(0)
        val extentfeat = seg(1)
        val feat = (basefeat, extentfeat)
        feat
      } else
        ("error", "error")
    }.filter(!_._1.contains("error"))

    basefeat2extentfeat_RDD.take(10).foreach(println)
    log.info("basefeat2extentfeat_RDD -------------------------- end")
    val basefeat2extentfeat_group = basefeat2extentfeat_RDD.groupByKey().map(line => (line._1, line._2.mkString("____"))).collect
    val basefeat2extentfeat_map = array2map(basefeat2extentfeat_group)
    basefeat2extentfeat_map
  }
}
