package com.spark.rdd

import org.ansj.splitWord.analysis.ToAnalysis
import org.apache.spark.ml.feature.Tokenizer
import org.apache.spark.mllib.classification.{LogisticRegressionWithLBFGS, NaiveBayes, SVMWithSGD}
import org.apache.spark.mllib.feature.{HashingTF, IDF}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.RandomForest
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.jsoup.Jsoup

/**
  * Created by TRS on 2017/6/20.
  */
object svnType {

  def main(args: Array[String]): Unit = {

    //创建sparkSession
    val sparkSession = SparkSession.builder
      .config("spark.sql.warehouse.dir", "D:\\WorkSpace\\spark\\spark-learning\\spark-warehouse")
      .master("local")
      .appName("spark session example")
      .getOrCreate()


    // Load training data in LIBSVM format.
    val data = MLUtils.loadLibSVMFile(sparkSession.sparkContext, "D:\\openSource\\spark-2.1.1-bin-hadoop2.7\\data\\mllib\\sample_libsvm_data.txt")

    // Split data into training (60%) and test (40%).
    val splits = data.randomSplit(Array(0.6, 0.4), seed = 11L)
    val training = splits(0).cache()

    training.take(10).foreach(println)


    val trainRdd = sparkSession.sparkContext.textFile("E:\\file\\res\\allType.txt").map(x => {
      val data = x.split(",")
      (data(0), data(1))
    }).filter(x => (x._1.equals("1") || x._1.equals("2") || x._1.equals("4")))


    println(trainRdd.count())


    //IT-IDF
    val trainTFDF = toTFIDF(sparkSession, trainRdd)


    //标示点
    var trainPoint = trainTFDF.map {
      x =>
        LabeledPoint(x._1.toDouble, x._3)
    }
    //训练模型
    // val model = NaiveBayes.train(trainPoint)

    trainPoint.take(10).foreach(x => println(x))


    //SVM
    //val numIterations = 100
    //val model = SVMWithSGD.train(trainPoint, numIterations)


    //逻辑回归
    // val model = new LogisticRegressionWithLBFGS()
    //   .setNumClasses(10)
    //   .run(trainPoint)


    // 随机森林训练参数设置
    //分类数
    val numClasses = 6
    // categoricalFeaturesInfo 为空，意味着所有的特征为连续型变量
    val categoricalFeaturesInfo = Map[Int, Int]()
    //树的个数
    val numTrees = 3
    //特征子集采样策略，auto 表示算法自主选取
    val featureSubsetStrategy = "auto"
    //纯度计算
    val impurity = "gini"
    //树的最大层次
    val maxDepth = 4
    //特征最大装箱数
    val maxBins = 32
    //训练随机森林分类器，trainClassifier 返回的是 RandomForestModel 对象
    val model = RandomForest.trainClassifier(trainPoint, numClasses, categoricalFeaturesInfo,
      numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins)


    //保存模型数据
    // model.save(sparkSession.sparkContext,"E:\\model")
    // val model=NaiveBayesModel.load(sparkSession.sparkContext,"E:\\model")


    val testRdd = sparkSession.sparkContext.textFile("E:\\file\\res\\test.txt").map(x => {
      val data = x.split(",")
      (data(0), data(1))
    }).filter(x => (x._1.equals("1") || x._1.equals("2") || x._1.equals("4")))
    //IT-IDF
    val testrainTFDF = toTFIDF(sparkSession, testRdd)
    //测试分类
    val res = testrainTFDF.map({
      x => {
        (x._1, model.predict(x._3))
      }
    })


    //新闻ID,分类
    res.foreach(x => println(x._1 + " " + x._2))
    //新闻总数
    val allCount = res.count()
    //分类正确数量
    val find = res.filter(x => x._1.toDouble.equals(x._2));
    find.foreach(x => println(x._1 + " " + x._2))
    //8856 11533
    println(find.count() + " " + allCount)


  }


  /**
    * 加载测试json新闻数据
    *
    * @param sparkSession
    * @param path
    * @return
    */
  def loadTestData(sparkSession: SparkSession, path: String) = {
    val df = sparkSession.read.json(path)
    df.printSchema()
    df.createOrReplaceTempView("news")

    val sql = "select author,body,is_topic,keywords,newsid,pub_time,source,sub_title,title,top_title,topicurl from news"

    val rdd = sparkSession.sql(sql).rdd.map(row =>
      (
        row.getString(4).substring(1).toLong,
        row.getString(8),
        getTextFromTHML(row.getString(6))
      )
    ).filter(x => (!x._2.equals("") && !x._3.equals("") && x._3.length > 200))


    val newsRdd = rdd.map(x => {
      val words = ToAnalysis.parse(x._3).getTerms
      var string = ""
      val size = words.size()
      for (i <- 0 until size) {
        string += words.get(i.toInt).getName + " "
      }
      (x._1.toString, string)
    })

    newsRdd

  }


  /**
    * 对RDD新闻进行TF-IDF特征计算
    *
    * @param rdd
    * @return
    */
  def toTFIDF(sparkSession: SparkSession, rdd: RDD[Tuple2[String, String]]) = {

    val df = rdd.map(x => {
      Row(x._1, x._2)
    })

    val schema = StructType(
      Seq(
        StructField("category", StringType, true)
        , StructField("text", StringType, true)
      )
    )

    //将dataRdd转成DataFrame
    val srcDF = sparkSession.createDataFrame(df, schema)
    srcDF.createOrReplaceTempView("news")


    srcDF.select("category", "text").take(2).foreach(println)


    //将分好的词按空格拆分转换为DataFrame
    var tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")
    var wordsData = tokenizer.transform(srcDF)

    wordsData.select("category", "text", "words").take(2).foreach(println)


    val hashingTF = new HashingTF(Math.pow(2, 18).toInt)


    val tfDF1 = wordsData.rdd.map(row => {
      val words = row.getSeq(2)
      (row.getString(0), row.getString(1), hashingTF.transform(words))
    })


    val tfDF = wordsData.rdd.map(row => {
      val words = row.getSeq(2)
      hashingTF.transform(words)
    })


    val idf = new IDF().fit(tfDF)
    val num_idf_pairs = tfDF1.map(x => {
      (x._1, x._2, idf.transform(x._3))
    })


    num_idf_pairs.take(10).foreach(println)

    num_idf_pairs
  }


  /**
    * 抽取HTML中文字
    *
    * @param htmlStr
    * @return
    */
  def getTextFromTHML(htmlStr: String): String = {
    val doc = Jsoup.parse(htmlStr)
    var text1 = doc.text()
    // remove extra white space
    val builder = new StringBuilder(text1)
    var index = 0
    while ( {
      builder.length > index
    }) {
      val tmp = builder.charAt(index)
      if (Character.isSpaceChar(tmp) || Character.isWhitespace(tmp)) builder.setCharAt(index, ' ')
      index += 1
    }
    text1 = builder.toString.replaceAll(" +", " ").trim
    text1
  }
}
