package pxene

import org.ansj.recognition.impl.FilterRecognition
import org.ansj.splitWord.analysis.NlpAnalysis
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.ml.feature.{HashingTF, IDF, Tokenizer}
import org.apache.spark.mllib.classification.NaiveBayes
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.sql.Row
import org.apache.hadoop.fs.Path
import org.apache.hadoop.fs.FileSystem

/**
 * @author zhengyi
 */
object NaiveBayes4Weixin {
  
  case class RawDataRecord(category: String, text: String)
  
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf()
    val sparkContext = new SparkContext(sparkConf)
    
    val tableName = "t_article_classify"
    val hbaseConf = HBaseConfiguration.create()
    hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
    hbaseConf.set("hbase.zookeeper.quorum", "dmp01,dmp02,dmp03,dmp04,dmp05")
    hbaseConf.set(TableInputFormat.INPUT_TABLE, tableName)

    val hbaseRDD = sparkContext.newAPIHadoopRDD(hbaseConf, classOf[TableInputFormat],
      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
      classOf[org.apache.hadoop.hbase.client.Result])
      
    val sqlContext = new org.apache.spark.sql.SQLContext(sparkContext)
    import sqlContext.implicits._
    
//    val broadcastFilter = sparkContext.broadcast(filter)

    val segmentRDD = hbaseRDD.map {
      case (rowKey, rowVal) => {
        val filter = new FilterRecognition()
        filter.insertStopNatures("w")
        val rowKeyStr = Bytes.toString(rowKey.get, rowKey.getOffset, rowKey.getLength)
        val code = rowKeyStr.substring(0, 4)
        val content = Bytes.toString(rowVal.getValue(Bytes.toBytes("article"), Bytes.toBytes("content")))
        val segment = NlpAnalysis.parse(content).recognition(filter).toStringWithOutNature(" ")
        RawDataRecord(code, segment)
      }
    }.toDF()
    
//    val splits = segmentRDD.randomSplit(Array(0.7, 0.3))
//    val trainingDF = splits(0).toDF()
//    val testDF = splits(1).toDF()
    // 将字符串转为数组
    val tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")
    val wordsDF = tokenizer.transform(segmentRDD)
    println("output1：")
    wordsDF.select($"category",$"text",$"words").take(1)
    
    // 计算每个词的词频
    val hashingTF = new HashingTF().setNumFeatures(50000).setInputCol("words").setOutputCol("rawFeatures")
    val featurizedDF = hashingTF.transform(wordsDF)
    println("output2：")
    featurizedDF.select($"category", $"words", $"rawFeatures").take(1)
    
    // 计算每个词TF-IDF
    val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
    val idfModel = idf.fit(featurizedDF)
    val rescaledDF = idfModel.transform(featurizedDF)
    println("output3：")
    rescaledDF.select($"category", $"features").take(1)
    
    // 转换成Bayes的输入格式
    val trainDataRdd = rescaledDF.select($"category", $"features").map {
      case Row(label: String, features: Vector) =>
        LabeledPoint(label.toDouble, Vectors.dense(features.toArray))
    }
    println("output4：")
    trainDataRdd.take(1)
    
    // 训练模型
    val model = NaiveBayes.train(trainDataRdd, lambda = 1.0, modelType = "multinomial")
    model.save(sparkContext, "/user/chenjinghui/model2")
        
    sparkContext.stop()
  }
}