package com.spark.rdd


import org.ansj.splitWord.analysis.ToAnalysis
import org.apache.spark.ml.feature.Tokenizer
import org.apache.spark.mllib.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.mllib.feature.{ChiSqSelector, HashingTF, IDF}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.jsoup.Jsoup


/**
  *
  * 0 汽车
  * 1 财经
  * 2 IT
  * 3 健康
  * 4 体育
  * 5 旅游
  * 6 教育
  * 7 招聘
  * 8 文化
  * 9 军事
  * Created by TRS on 2017/6/19.
  */
object newstype {


  def main(args: Array[String]): Unit = {

    //创建sparkSession
    val sparkSession = SparkSession.builder
      .config("spark.sql.warehouse.dir", "D:\\WorkSpace\\spark\\spark-learning\\spark-warehouse")
      .master("local")
      .appName("spark session example")
      .getOrCreate()


    val trainRdd = sparkSession.sparkContext.textFile("E:\\file\\res\\allType.txt").map(x => {
      val data = x.split(",")
      (data(0), data(1))
    }).filter(x =>  (x._1.equals("0")||x._1.equals("1") ) )

    println(trainRdd.count())


    //IT-IDF
    val trainTFDF = toTFIDF(sparkSession, trainRdd)


    //卡方选择

    val discretizedData = trainTFDF.map { lp =>
      LabeledPoint(lp._1.toDouble, lp._3)
    }
    // Create ChiSqSelector that will select top 50 of 692 features
    val selector = new ChiSqSelector(50)
    // Create ChiSqSelector model (selecting features)
    val transformer = selector.fit(discretizedData)
    // Filter the top 50 features from each feature vector
    val filteredData = discretizedData.map { lp =>
      LabeledPoint(lp.label, transformer.transform(lp.features))
    }


    //标示点
/*    var trainPoint = trainTFDF.map {
      x =>
        LabeledPoint(x._1.toDouble, x._3)
    }*/


    //训练模型
    val model = NaiveBayes.train(filteredData)

    //保存模型数据
    // model.save(sparkSession.sparkContext,"E:\\model")
    // val model=NaiveBayesModel.load(sparkSession.sparkContext,"E:\\model")


    val testRdd = sparkSession.sparkContext.textFile("E:\\file\\res\\test.txt").map(x => {
      val data = x.split(",")
      (data(0), data(1))
    }).filter(x => (x._1.equals("0")||x._1.equals("1")) )


    //IT-IDF
    val testrainTFDF = toTFIDF(sparkSession, testRdd)


    //卡方选择
    /*    val discretizedData1 = testrainTFDF.map { lp =>
          LabeledPoint(lp._1.toDouble, Vectors.dense(lp._3.toArray.map { x => (x / 16).floor }))
        }
        // Create ChiSqSelector that will select top 50 of 692 features
        val selector1 = new ChiSqSelector(50)
        // Create ChiSqSelector model (selecting features)
        val transformer1 = selector1.fit(discretizedData)
        // Filter the top 50 features from each feature vector
        val filteredData1 = discretizedData1.map { lp =>
          (lp.label, transformer1.transform(lp.features))
        }*/


    //测试分类
    val res = testrainTFDF.map({
      x => {
        (x._1, model.predict(x._3))
      }
    })
    //新闻ID,分类
    res.foreach(x => println(x._1 + " " + x._2))
    //新闻总数
    val allCount = res.count()
    //分类正确数量
    val find = res.filter(x => x._1.toDouble.equals(x._2));
    find.foreach(x => println(x._1 + " " + x._2))
    //8856 11533
    println(find.count() + " " + allCount)


  }


  /**
    * 加载测试json新闻数据
    *
    * @param sparkSession
    * @param path
    * @return
    */
  def loadTestData(sparkSession: SparkSession, path: String) = {
    val df = sparkSession.read.json(path)
    df.printSchema()
    df.createOrReplaceTempView("news")

    val sql = "select author,body,is_topic,keywords,newsid,pub_time,source,sub_title,title,top_title,topicurl from news"

    val rdd = sparkSession.sql(sql).rdd.map(row =>
      (
        row.getString(4).substring(1).toLong,
        row.getString(8),
        getTextFromTHML(row.getString(6))
      )
    ).filter(x => (!x._2.equals("") && !x._3.equals("") && x._3.length > 200))


    val newsRdd = rdd.map(x => {
      val words = ToAnalysis.parse(x._3).getTerms
      var string = ""
      val size = words.size()
      for (i <- 0 until size) {
        string += words.get(i.toInt).getName + " "
      }
      (x._1.toString, string)
    })

    newsRdd

  }


  /**
    * 对RDD新闻进行TF-IDF特征计算
    *
    * @param rdd
    * @return
    */
  def toTFIDF(sparkSession: SparkSession, rdd: RDD[Tuple2[String, String]]) = {

    val df = rdd.map(x => {
      Row(x._1, x._2)
    })

    val schema = StructType(
      Seq(
        StructField("category", StringType, true)
        , StructField("text", StringType, true)
      )
    )

    //将dataRdd转成DataFrame
    val srcDF = sparkSession.createDataFrame(df, schema)
    srcDF.createOrReplaceTempView("news")


    srcDF.select("category", "text").take(2).foreach(println)


    //将分好的词按空格拆分转换为DataFrame
    var tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")
    var wordsData = tokenizer.transform(srcDF)

    wordsData.select("category", "text", "words").take(2).foreach(println)


    val hashingTF = new HashingTF(Math.pow(2, 18).toInt)


    val tfDF1 = wordsData.rdd.map(row => {
      val words = row.getSeq(2)
      (row.getString(0), row.getString(1), hashingTF.transform(words))
    })


    val tfDF = wordsData.rdd.map(row => {
      val words = row.getSeq(2)
      hashingTF.transform(words)
    })


    val idf = new IDF().fit(tfDF)
    val num_idf_pairs = tfDF1.map(x => {
      (x._1, x._2, idf.transform(x._3))
    })


    num_idf_pairs.take(10).foreach(println)

    num_idf_pairs
  }


  /**
    * 抽取HTML中文字
    *
    * @param htmlStr
    * @return
    */
  def getTextFromTHML(htmlStr: String): String = {
    val doc = Jsoup.parse(htmlStr)
    var text1 = doc.text()
    // remove extra white space
    val builder = new StringBuilder(text1)
    var index = 0
    while ( {
      builder.length > index
    }) {
      val tmp = builder.charAt(index)
      if (Character.isSpaceChar(tmp) || Character.isWhitespace(tmp)) builder.setCharAt(index, ' ')
      index += 1
    }
    text1 = builder.toString.replaceAll(" +", " ").trim
    text1
  }

}
