package org.nerve.data.mining.spark.mongo

import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import com.mongodb.spark.rdd.partitioner.{DefaultMongoPartitioner, MongoSplitVectorPartitioner}
import org.apache.spark.SparkContext
import org.bson.Document
import org.nerve.data.mining.spark.CommonModeling
import org.slf4j.LoggerFactory

import scala.beans.BeanProperty
import scala.util.control.Breaks._
/**
  * 计算已经入库的数据表中，指定field名称的个数，以及分布情况
  * 比如，想得到想得到包含”password" 字段的表的总数，得到的结果为：
  *   keyword   password
  *   total     100
  *   regions   [
  *               "china":80,
  *               "vietnam":10,
  *               "hong kong":10
  *             ]
  *
  *
  * Created by zengxm on 2016/6/30.
  */
class CollectionLogModeling(sparkContext: SparkContext) extends CommonModeling(sparkContext){
  val logger=LoggerFactory.getLogger(classOf[CollectionLogModeling])

  //默认的数据库
  @BeanProperty var db="mongos"
  //默认的数据表
  @BeanProperty var col="mongo_collection_log"
  @BeanProperty var host="mongodb://127.0.0.1:27017"

  @BeanProperty var readConfig:Option[ReadConfig]=None

  @BeanProperty var versionGTE3_2=true  //mongodb版本大于3.2，因为默认的分割器需要3.2+的版本支持
  @BeanProperty var fields=Set[String]()

  private def buildReadConfig:ReadConfig={
    readConfig match {
      case Some(r)=>r
      case None=>{
        //如果版本小于3.2，就是用MongoSplitVectorPartitioner
        readConfig=Some(new ReadConfig(db,col, Some(host),1000,
          if(versionGTE3_2) DefaultMongoPartitioner else MongoSplitVectorPartitioner))
        readConfig.get
      }
    }
  }

  private def cleanReadConfig={
    readConfig=None
    buildReadConfig
  }

  def addFields(f:Seq[String])=f.foreach(fields+=_)
  def addFields(f:Array[String])=f.foreach(fields+=_)

  def computeAll={
    val rdd=MongoSpark.load(sparkContext, buildReadConfig)
    logger.info("%s.%s count is %d".format(db,col,rdd.count()))


    val targetFields=fields
    val targets=rdd.filter(d=>{
      //获取fields
      val fs=d.getString("fieldString")
      var target=true  //是否为目标
      breakable{
        targetFields.foreach(f=>{
          if(!fs.toLowerCase.contains(f.toLowerCase)){
            target=false
            break
          }
        })
      }

      target
    })

    logger.info("count of contains %s is %d".format(fields.toString(), targets.count()))
  }

  /**
    * 使用Pipeline进行过滤
    * 类似于mongo sheel里面的：
    * db.mongo_collection_log.aggregate({$match:{$and:[{fieldString:/gps/i},{fieldString:/password/i}]}})
    */
  def compute={
    val rdd=MongoSpark.load(sparkContext, buildReadConfig)
    val fieldFilter=fields.map("{fieldString:/%s/i}".format(_)).mkString(",")
    val aggregatedRdd =rdd.withPipeline(Seq(Document.parse("{$match:{$and:[%s]}}".format(fieldFilter))))

    val total=aggregatedRdd.count
    logger.info("count of contains %s is %d".format(fields.toString(), total))

    aggregatedRdd.foreach(d=>println(d.toJson))
    //对每个IP进行统计
    val hostCounts=aggregatedRdd.map(d=>(d.getString("host"),1)).reduceByKey(_+_)
    val hostM = new java.util.HashMap[String, Any]()
    hostM.put("total",total)

//    hostCounts.persist().foreach(d=>{
//      println(d._1+"\t\t"+d._2)
//      hostM.put(d._1, d._2)
//      println("map="+hostM.toString)
//    })
//
//    logger.info("ip : %s".format(hostM.toString))
//
//    logger.info("hostCounts.collect.toString="+hostCounts.collect.length)
    hostCounts.collect().foreach(d=>hostM.put(d._1,d._2))
    logger.info("ip : %s".format(hostM.toString))

    //logger.info("hostCounts.collect().toMap="+(hostCounts.collect().toMap.toString()))
    logger.info("count of contains %s is %d".format(fields.toString(), total))
  }
}
