package com.pxene.dmp.textcategorization

import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.ansj.recognition.impl.FilterRecognition
import org.ansj.splitWord.analysis.NlpAnalysis
import org.apache.spark.ml.feature.Tokenizer
import org.apache.spark.ml.feature.HashingTF
import org.apache.spark.ml.feature.IDF
import org.apache.spark.sql.Row
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.classification.NaiveBayes
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.ml.classification.NaiveBayesParams
import org.apache.spark.mllib.classification.NaiveBayesModel
import org.apache.hadoop.hbase.client.Scan
import java.text.SimpleDateFormat
import org.apache.hadoop.hbase.protobuf.ProtobufUtil
import org.apache.hadoop.hbase.util.Base64
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.client.Put

object NaiveBayes2Use {

  case class RawDataRecord(category: String, length: Int, text: String)
  val code2Name = Map("0101" -> "社会",
    "0102" -> "国际",
    "0103" -> "国内",
    "0104" -> "军事",
    "0201" -> "足球",
    "0202" -> "篮球",
    "0203" -> "综合",
    "0301" -> "服饰",
    "0302" -> "美容",
    "0303" -> "美体",
    "0401" -> "情感",
    "0402" -> "八卦",
    "0403" -> "微整形",
    "0501" -> "养生",
    "0502" -> "疾病",
    "0503" -> "医药",
    "0601" -> "产前",
    "0602" -> "产后",
    "0701" -> "股票",
    "0702" -> "基金",
    "0703" -> "理财",
    "0704" -> "银行",
    "0705" -> "保险",
    "0706" -> "外汇",
    "0801" -> "电影",
    "0802" -> "电视",
    "0803" -> "音乐",
    "0901" -> "幼教",
    "0902" -> "中学",
    "0903" -> "大学",
    "0904" -> "出国",
    "1001" -> "国内游",
    "1002" -> "出境游",
    "1100" -> "彩票",
    "1201" -> "创业",
    "1202" -> "数码",
    "1301" -> "购车",
    "1302" -> "用车",
    "1401" -> "购房",
    "1402" -> "装修",
    "1500" -> "游戏")

  val articleTable = "t_prod_weixin_art"
  val articleFamily = "info"
  val articleColumn = "article_content"

  val categoryTable = "t_article_category"
  val categoryFamily = "info"
  val categoryColumnsCode = "categoryCode"
  val categoryColumnsName = "categoryName"
  val categoryColumnsLen = "size"

  def main(args: Array[String]): Unit = {
    println("<=ee-debug=>the name of running job is NaiveBayes2Use")

    if (args.length != 2) {
      println("<=ee-debug=>args length must be 2")
      return
    }
    val date = args(0)
        if (!date.matches("[\\d]{8}")) {
          println("<=ee-debug=>args(0) date is invalid")
          return
        }
    val modelPath = args(1)

    val sparkConf = new SparkConf().setAppName("naiveBayes2Use").set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val sparkContext = new SparkContext(sparkConf)

    val hbaseConf = HBaseConfiguration.create()
    hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
    hbaseConf.set("hbase.zookeeper.quorum", "dmp01,dmp02,dmp03,dmp04,dmp05")
    hbaseConf.set(TableInputFormat.INPUT_TABLE, articleTable)
    //设置读取列组
    hbaseConf.set(TableInputFormat.SCAN_COLUMNS, articleFamily)
    //增加过滤条件
    val scan = new Scan()
    //预测入库一天的数据
    val executeTime = new SimpleDateFormat("yyyyMMdd").parse(args(0)).getTime
    scan.setTimeRange(executeTime, executeTime + 1 * 24 * 60 * 60 * 1000 )
    val proto = ProtobufUtil.toScan(scan)
    hbaseConf.set(TableInputFormat.SCAN, Base64.encodeBytes(proto.toByteArray))

    val hbaseRDD = sparkContext.newAPIHadoopRDD(hbaseConf, classOf[TableInputFormat],
      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
      classOf[org.apache.hadoop.hbase.client.Result])

    val sqlContext = new org.apache.spark.sql.SQLContext(sparkContext)
    import sqlContext.implicits._

    val filter = new FilterRecognition()
    filter.insertStopNatures("w")
    val broadcastFilter = sparkContext.broadcast(filter)

    val segmentRDD = hbaseRDD.map(tuple => tuple._2).map(
      r => (Bytes.toString(r.getRow), Bytes.toString(r.getValue(Bytes.toBytes(articleFamily), Bytes.toBytes(articleColumn)))))
      .map {
        r =>
          val result = NlpAnalysis.parse(r._2).recognition(broadcastFilter.value)
          (r._1, r._2.length(), result)
      }.filter(x => x._3.getTerms.size() > 0).map {
        r =>
          {
            val segment = r._3.toStringWithOutNature(" ")
            RawDataRecord(r._1, r._2, segment)
          }
      }.toDF()

    //将分好的词转换为数组  
    val tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")
    val wordsData = tokenizer.transform(segmentRDD)
    val hashingTF = new HashingTF().setInputCol("words").setOutputCol("rawFeatures").setNumFeatures(500000)
    val featurizedData = hashingTF.transform(wordsData)
    //计算TF-IDF值  
    val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
    val idfModel = idf.fit(featurizedData)
    val rescaledData = idfModel.transform(featurizedData)
    //转换成计算需要的输入格式  
    val classifyRDD = rescaledData.select($"category", $"length", $"features").map {
      case Row(label: String, length: Int, features: Vector) =>
        (label, length, Vectors.dense(features.toArray))
    }
    //加载模型  
    val model = NaiveBayesModel.load(sparkContext, modelPath)

    val resultRDD = classifyRDD.map {
      p =>
        val code = model.predict(p._3).toInt.formatted("%04d")
        val name = code2Name.get(code)
        val article = p._1
        val length = p._2
        (article, code, name, length)
    }
    //结果输出
    val job = Job.getInstance(hbaseConf)
    val jobConf = job.getConfiguration
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, categoryTable)
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
    resultRDD.map(triple => {
      val put = new Put(Bytes.toBytes(triple._1))
      put.addColumn(Bytes.toBytes(categoryFamily), Bytes.toBytes(categoryColumnsCode), Bytes.toBytes(triple._2))
      put.addColumn(Bytes.toBytes(categoryFamily), Bytes.toBytes(categoryColumnsName), Bytes.toBytes(triple._3.get))
      put.addColumn(Bytes.toBytes(categoryFamily), Bytes.toBytes(categoryColumnsLen), Bytes.toBytes(triple._4))
      (new ImmutableBytesWritable, put)
    }).saveAsNewAPIHadoopDataset(jobConf)

    sparkContext.stop()
  }
}