package pxene

import scala.reflect.runtime.universe

import org.ansj.recognition.impl.FilterRecognition
import org.ansj.splitWord.analysis.NlpAnalysis
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.protobuf.ProtobufUtil
import org.apache.hadoop.hbase.util.Base64
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.ml.feature.HashingTF
import org.apache.spark.ml.feature.IDF
import org.apache.spark.ml.feature.Tokenizer
import org.apache.spark.mllib.classification.NaiveBayesModel
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions
import org.apache.spark.sql.Row
import org.apache.hadoop.hbase.filter.PageFilter

/**
 * @author zhengyi
 */
object ModelUse {

  case class RawDataRecord(category: String, text: String)

  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf()

    val sparkContext = new SparkContext(sparkConf)
    val sqlContext = new org.apache.spark.sql.SQLContext(sparkContext)
    import sqlContext.implicits._

    val tableName = "t_prod_weixin_art"
    val hbaseConf = HBaseConfiguration.create()
    hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
    hbaseConf.set("hbase.zookeeper.quorum", "dmp01,dmp02,dmp03,dmp04,dmp05")
    //增加过滤条件
    val scan = new Scan()
    //    scan.setFilter(new SingleColumnValueFilter("basic".getBytes, "age".getBytes,
    //      CompareOp.GREATER_OR_EQUAL, Bytes.toBytes(18)))
    val pageFilter = new PageFilter(10)
    scan.setFilter(pageFilter)
    hbaseConf.set(TableInputFormat.SCAN, convertScanToString(scan))

    //设置读取表名
    hbaseConf.set(TableInputFormat.INPUT_TABLE, tableName)
    //设置读取列组
    hbaseConf.set(TableInputFormat.SCAN_COLUMNS, "info")
    //应用newAPIHadoopRDD读取HBase，返回NewHadoopRDD
    val hbaseRDD = sparkContext.newAPIHadoopRDD(hbaseConf, classOf[TableInputFormat],
      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
      classOf[org.apache.hadoop.hbase.client.Result])

    //读取结果集RDD，返回一个MapPartitionsRDD
    val resRDD = hbaseRDD.map(tuple => tuple._2)

    val rdd1 = resRDD.map(r => (Bytes.toString(r.getRow), Bytes.toString(r.getValue(Bytes.toBytes("info"), Bytes.toBytes("article_content")))))

    val srcData = rdd1.map {
      x =>
        {
          val filter = new FilterRecognition()
          filter.insertStopNatures("w")
          val segment = NlpAnalysis.parse(x._2).recognition(filter).toStringWithOutNature(" ")
          RawDataRecord(x._1, segment)
        }
    }

    val srcDF = srcData.toDF()

    //将分好的词转换为数组  
    val tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")
    val wordsData = tokenizer.transform(srcDF)
    val hashingTF = new HashingTF().setInputCol("words").setOutputCol("rawFeatures").setNumFeatures(50000)
    val featurizedData = hashingTF.transform(wordsData)
    //计算TF-IDF值  
    val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
    val idfModel = idf.fit(featurizedData)
    val rescaledData = idfModel.transform(featurizedData)

    //转换成计算需要的输入格式  
    val trainDataRdd = rescaledData.select($"category", $"features").map {
      case Row(label: String, features: Vector) =>
        (label, Vectors.dense(features.toArray))
    }
    //加载模型  
    val model = NaiveBayesModel.load(sparkContext, "/user/chenjinghui/model2")
    //对测试数据集使用训练模型进行分类预测  
    val testpredictionAndLabel = trainDataRdd.map(p => (model.predict(p._2).toInt.formatted("%04d"), p._1))

    //结果输出
    val job = Job.getInstance(hbaseConf)
    val jobConf = job.getConfiguration
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, "t_article_test")
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
    testpredictionAndLabel.map(convert).saveAsNewAPIHadoopDataset(jobConf)

    //    testpredictionAndLabel.foreach(println) //打印结果
    sparkContext.stop()
  }

  def convert(triple: (String, String)) = {
    val p = new Put(Bytes.toBytes(triple._2))
    p.addColumn(Bytes.toBytes("article"), Bytes.toBytes("category"), Bytes.toBytes(triple._1))
    (new ImmutableBytesWritable, p)
  }

  def convertScanToString(scan: Scan) = {
    val proto = ProtobufUtil.toScan(scan)
    Base64.encodeBytes(proto.toByteArray)
  }

}