package com.pxene.dmp.textcategorization

import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.ansj.recognition.impl.FilterRecognition
import org.ansj.splitWord.analysis.NlpAnalysis
import org.apache.spark.ml.feature.Tokenizer
import org.apache.spark.ml.feature.HashingTF
import org.apache.spark.ml.feature.IDF
import org.apache.spark.sql.Row
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.classification.NaiveBayes
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.linalg.Vectors
/**
 * @author zhengyi
 */
object NaiveBayes2Train {
  
  case class RawDataRecord(category: String, text: String)
  
  def main(args: Array[String]): Unit = {
    println("<=ee-debug=>the name of running job is NaiveBayes2Train")
    
    if (args.length != 1) {
      println("<=ee-debug=>args length must be 1")
      return
    }
    val modelPath = args(0)
    val sparkConf = new SparkConf().setAppName("naiveBayes2Train").set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val sparkContext = new SparkContext(sparkConf)
    
    val tableName = "t_text_categorization"
    val hbaseConf = HBaseConfiguration.create()
    hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
    hbaseConf.set("hbase.zookeeper.quorum", "dmp01,dmp02,dmp03,dmp04,dmp05")
    hbaseConf.set(TableInputFormat.INPUT_TABLE, tableName)

    val hbaseRDD = sparkContext.newAPIHadoopRDD(hbaseConf, classOf[TableInputFormat],
      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
      classOf[org.apache.hadoop.hbase.client.Result])
      
    val sqlContext = new org.apache.spark.sql.SQLContext(sparkContext)
    import sqlContext.implicits._
    
    val filter = new FilterRecognition()
    filter.insertStopNatures("w")
    val broadcastFilter = sparkContext.broadcast(filter)
    
    val segmentRDD = hbaseRDD.map {
      case (rowKey, rowVal) => {
        val rowKeyStr = Bytes.toString(rowKey.get, rowKey.getOffset, rowKey.getLength)
        val code = rowKeyStr.substring(0, 3)
        val content = Bytes.toString(rowVal.getValue(Bytes.toBytes("article"), Bytes.toBytes("content")))
        val segment = NlpAnalysis.parse(content).recognition(broadcastFilter.value).toStringWithOutNature(" ")
        
        RawDataRecord(code, segment)
      }
    }.toDF()
    
    // 将字符串转为数组
    val tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")
    val wordsDF = tokenizer.transform(segmentRDD)
    println("output1：")
    wordsDF.select($"category", $"text", $"words").take(1)
    
    // 计算每个词的词频
    val hashingTF = new HashingTF().setNumFeatures(500000).setInputCol("words").setOutputCol("rawFeatures")
    val featurizedDF = hashingTF.transform(wordsDF)
    println("output2：")
    featurizedDF.select($"category", $"words", $"rawFeatures").take(1)
    
    // 计算每个词TF-IDF
    val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
    val idfModel = idf.fit(featurizedDF)
    val rescaledDF = idfModel.transform(featurizedDF)
    println("output3：")
    rescaledDF.select($"category", $"features").take(1)
    
    // 转换成Bayes的输入格式
    val trainDataRdd = rescaledDF.select($"category", $"features").map {
      case Row(label: String, features: Vector) =>
        LabeledPoint(label.toDouble, Vectors.dense(features.toArray))
    }
    println("output4：")
    trainDataRdd.take(1)
    
    // 训练模型
    val model = NaiveBayes.train(trainDataRdd, lambda = 1.0, modelType = "multinomial")
    model.save(sparkContext, modelPath)
        
    sparkContext.stop()
  }
}