package com.ipinyou.mprofile

import java.lang.String

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions
import org.apache.spark.streaming.Minutes
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream.toPairDStreamFunctions
import org.apache.spark.streaming.kafka.KafkaUtils
import org.slf4j.LoggerFactory
import org.slf4j.MarkerFactory

import com.ipinyou.hbase.model.MUser
import com.ipinyou.hbase.service.impl.HBaseConnectionServiceImpl

import kafka.serializer.StringDecoder
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.storage.StorageLevel

/**
 * Created by miaoyujia on 15/12/1.
 */

object MbScoring {

  var sc: SparkContext = null

  // log
  val log = LoggerFactory.getLogger(this.getClass)
  val hbaseServerce = new HBaseConnectionServiceImpl();
  val PARTITIONS = 1050

  /**
   * Spark批量读Hbase
   */
  def getFromHbaseByBatch(iter: Iterator[(String, Float)]): Iterator[(String, Float)] = {
    val muser = new MUser()
    val emMap = new scala.collection.mutable.HashMap[String, Float]

    while (iter.hasNext) {
      val arr = iter.next()
      val id = arr._1
      val sweight = arr._2
      emMap(id) = sweight
    }
    val idlist = emMap.keySet.toArray
    val rlist = hbaseServerce.get(muser, idlist: _*)

    for (i <- 0 until rlist.size()) {
      val ruser = rlist.get(i)
      val uid = ruser.getId
      val weight = ruser.getContent.toFloat

      val sweight = emMap.getOrElse(uid, 0.toFloat)
      emMap(uid) = weight + sweight
    }
    emMap.toArray.iterator
  }

  def getField(line: String, partitionIndex: Int, fieldIndex: Int, fieldCounts: Int): String = {

    var res: String = "error"
    val segs = line.split("\u0001", -1)

    // 修复 : >=13
    if (segs.length >= 13) {
      // 字段
      val partitionContent = segs(partitionIndex)
      val arr = partitionContent.split("\u0002", -1)
      if (arr.length >= fieldCounts) {
        res = arr(fieldIndex)
      }
    }
    res
  }

  def save2HbaseByBatch(iter: Iterator[(String, Float)]) {
    val musers = new ArrayBuffer[MUser]()
    while (iter.hasNext) {
      val muser = new MUser()
      val arr = iter.next()
      val rowkey = arr._1
      val weight = arr._2
      muser.setId(rowkey)
      muser.setContent(weight.toString)
      musers.append(muser)
    }
    hbaseServerce.insert(musers: _*)
  }

  def getPyPartiton(pyid: String): String = {
    val py_hash = pyid.hashCode()
    val py_partition = Math.abs(py_hash) % PARTITIONS
    val partition = "%04d".format(py_partition)
    partition
  }

  def main(args: Array[String]): Unit = {

    // local field

    // paremeters
    var model_file = ""
    var basefeat2extentfeat_file = ""
    var intervel = 0
    val config = new SparkConf().setAppName("mobile.scoring")
    // spark  streaming  context
    var ssc: StreamingContext = null

    if (args.length == 0) {
      model_file = "file:///Users/miaoyujia/git/mobile-userprofile-src/mprofile/file/base"
      basefeat2extentfeat_file = "file:///Users/miaoyujia/git/mobile-userprofile-src/mprofile/file/basefeat2extentfeat"
      config.setMaster("local[2]")
      if (sc == null) sc = new SparkContext(config)
      ssc = new StreamingContext(sc, Seconds(2))
    } else {
      model_file = args(0)
      basefeat2extentfeat_file = args(1)
      intervel = args(2).toInt
      if (sc == null) sc = new SparkContext(config)
      ssc = new StreamingContext(sc, Minutes(intervel))
    }
    log.info(MarkerFactory.getMarker("YuJia"), "Scoring, {}", Array("model_file", model_file, "basefeat2extentfeat_file", basefeat2extentfeat_file))

    // model_RDD basefeat2extentfeat_RDD
    // 优化 广播变量
    val br_model_map = sc.broadcast(getBaseFeature(sc, model_file))

    val cousume_group = "mobileprofile_t1"
    // read data from kafka  启用DIrect接口
    val brokers = "192.168.145.216:9092,192.168.145.217:9092,192.168.145.218:9092,192.168.145.221:9092,192.168.145.222:9092,192.168.145.223:9092";
    // auto.offset.reset  offset的起始位置; largest, smallest   , "auto.offset.reset" -> "largest"
    val kafkaParams = Map[String, String]("metadata.broker.list" -> brokers, "group.id" -> cousume_group)
    // 暂时去掉unbid
    val topics = Set("bid-deal", "bid-nodeal","unbid-deal", "unbid-nodeal","click" )
    val new_msgDStreamRdd = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)

    // 优化 : 提高接收的并行度
    //    val streams = (1 to 4).map { _ => KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics) }
    //    val new_msgDStreamRdd = ssc.union(streams)

    //    val zkQuorum = "192.168.146.70:2181,192.168.146.91:2181,192.168.146.66:2181,192.168.146.56:2181,192.168.146.64:2181,192.168.146.65:2181,192.168.146.67:2181,192.168.146.68:2181,192.168.146.69:2181/kafka_new"
    //    val groupId = "mprofile_v6"
    //    val topics = Map("unbid" -> 1)
    //    // 并行度优化
    //    val msgDStreamRdds = (1 to 40).map { _ => KafkaUtils.createStream(ssc, zkQuorum, groupId, topics, StorageLevel.MEMORY_AND_DISK_SER_2) }

    // 去掉repartition 通过spark.streaming.interval 来控制数据块大小, 从而控制task数量

    //    val msgDStreamRdd = ssc.union(msgDStreamRdds).repartition(40)
    //   val msgDStreamRdd = ssc.union(msgDStreamRdds)

    // TODO 商品规则 -> 访客, 转化
    val old_brokers = "192.168.145.201:9092," +
      "192.168.145.202:9092," +
      "192.168.145.203:9092," +
      "192.168.145.204:9092," +
      "192.168.145.205:9092," +
      "192.168.145.206:9092," +
      "192.168.145.207:9092," +
      "192.168.145.208:9092," +
      "192.168.145.209:9092," +
      "192.168.145.210:9092"
    val old_kafkaParams = Map[String, String]("metadata.broker.list" -> old_brokers, "group.id" -> cousume_group)
    val old_topics = Set("adv", "cvt")
    val old_msgDStreamRdd = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, old_kafkaParams, old_topics)

    // 抽取feature, pyid
    val new_msg_rdd = new_msgDStreamRdd.flatMap { x =>
      val line = x._2
      // bid&unbid
      // appflag 0 pc端 1 移动端
      val appflag = getField(line, 5, 0, 10)
      // 日志类型
      val ActionType = getField(line, 1, 4, 10)
      if (appflag == "1") {
        val pyid = getField(line, 2, 0, 10)

        // 优化: 如果pyid的长度小于15直接continue
        if (!pyid.isEmpty() && pyid.length() > 15) {
          if (ActionType == "click") { // topic click
            val creativeId = getField(line, 9, 7, 10)
            if (creativeId.isEmpty()) List() else List(("creativeId:" + creativeId, pyid))
          } else { // topic bid/unbid
            val appid = getField(line, 5, 1, 10)
            if (appid.isEmpty()) List() else List(("appid:" + appid, pyid))
          }
        } else {
          List()
        }
      } else {
        List()
      }
    }

    // 访客, 转化商品 feature
    val old_msg_rdd = old_msgDStreamRdd.flatMap { x =>
      val line = x._2
      val arr = line.split("\t", -1)
      if (arr.length > 66) {
        val ActionType = arr(2)
        val devicetype = arr(41)
        val pyid = arr(15)
        var productid = ""
        if (ActionType == "Conversion") {
          productid = arr(66)
        } else {
          productid = arr(54)
        }
        if (devicetype != "General") {
          // 优化: 如果pyid的长度小于15直接continue
          if (!pyid.isEmpty() && !productid.isEmpty() && pyid.length() > 15) List(("productid:" + productid, pyid)) else List()
        } else {
          List()
        }
      } else {
        List()
      }
    }

    val msg_rdd = new_msg_rdd.union(old_msg_rdd)

    // scoring
    val action_score = msg_rdd
      .flatMap { line =>
        val feat = line._1
        val pyid = line._2
        val m = br_model_map.value
        if (m.contains(feat)) {
          val value = m.get(feat).get
          val segs = value.split("____", -1)
          for (ele <- segs) yield (ele, pyid)
        } else {
          List()
        }
      }.map { line =>
        val pyid = line._2
        val label = line._1.split("__")(0)
        val weight = line._1.split("__")(1)
        // hash&取模
        val py_partition = getPyPartiton(pyid)
        (List(py_partition, label, pyid).mkString("|"), weight.toFloat)
      }.reduceByKey(extracted(_, _), 50)

    // reduceByKeyLocally
    log.info("action_score ----------------------------------------------------- end")

    //    action_score.print(10)
    // 从Hbase中读取merge并写入到Hbase
    action_score.mapPartitions(getFromHbaseByBatch).foreachRDD(rdd => rdd.foreachPartition(save2HbaseByBatch))

    ssc.start()
    ssc.awaitTermination()
  }

  def extracted(x: Float, y: Float): Float = {
    x + y
  }

  def getBaseFeature(sc: org.apache.spark.SparkContext, model_file: String) = { // 累加器

    //    val model_RDD = model.map { line =>
    //      val seg = line.trim().split("\t", -1)
    //      if (seg.length == 3) {
    //        val label = seg(0)
    //        val feature = seg(1)
    //        val weight = seg(2);
    //        val model_record = (feature, List(label, weight).mkString("__"))
    //        model_record
    //      } else
    //        ("error", "error")
    //    }.filter(!_._1.contains("error"))

    // 优化, flatMap 替换 map + filter
    val model_RDD = sc.textFile(model_file).flatMap { line =>
      val seg = line.trim().split("\t", -1)
      if (seg.length == 3) {
        val label = seg(0)
        val feature = seg(1)
        val weight = seg(2)
        val model_record = List((feature, List(label, weight).mkString("__")))
        model_record
      } else
        List()
    }.cache()

    model_RDD.take(10).foreach(println)
    log.info("model_RDD -------------------------- end")

    //    val model_group = model_RDD.groupByKey().map(line => (line._1, line._2.mkString("____"))).collect()
    //    val model_map = array2map(model_group)

    // 优化, groupByKey + map => groupByKey + mapValues;  array2map => collectAsMap
    model_RDD.groupByKey().mapValues(x => x.mkString("____")).collectAsMap()

  }

}
