package com.qing

import java.util

import com.huaban.analysis.jieba.JiebaSegmenter
import com.mongodb.spark.MongoSpark
import com.mongodb.spark.rdd.MongoRDD

import scala.io.Source
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.feature.{HashingTF, IDF, Tokenizer}
import org.apache.spark.ml.clustering._
import org.apache.spark.ml.linalg.SparseVector
import org.apache.spark.{SparkConf, SparkContext}
import org.codehaus.jettison.json.{JSONArray, JSONObject}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SQLContext}
import org.bson.Document

import scala.collection.JavaConversions._
import scala.collection.mutable
import org.apache.spark.mllib.feature.{HashingTF => MllibHashingTF}

/**
  * Created by wuliao on 2017/12/19.
  */
object Index {
  def main(args: Array[String]): Unit = {




    Logger.getLogger("org").setLevel(Level.ERROR)

    val sparkConf = new SparkConf()
      .set("spark.mongodb.input.uri",
        "mongodb://175.102.18.112:27018/OpenCollege_kw.art_syn")
    val sc = new SparkContext("local", "simple-ml", sparkConf)

    val rdd = MongoSpark.load(sc)
    val sqlCtx = new SQLContext(sc)

    val result = doc2Json(rdd)

    val params = Array("_id", "articy")
    val algorithm = "LDA"

    //过滤参数
    val filterRdd = filterParams(params, result)

    val tf_idf = tfidf(sqlCtx, filterRdd)
    //lda
    //    val model = lda(tf_idf)
    //    val d = model.transform(tf_idf)
    //    d.show(20, true)


    val lda = new LDA()
      .setMaxIter(5)
      .setK(3)


    //    println(".............")

    //    val topics = model.describeTopics(maxTermsPerTopic = 20)
    //    topics.rdd.map(s => {
    //      s.get(1).asInstanceOf[mutable.WrappedArray[Int]].map(x => mapWords.getOrElse(x, x.toString()))
    //    }).foreach(println(_))
    //
    //    topics.show(true)
    //    dealAlgorithm(algorithm)


    //    algorithm match {
    //      case "TFIDF" => {
    //        tfidf(sqlCtx, filterRdd)
    //
    //
    //      }
    //    }


  }


  def doc2Json(rdd: MongoRDD[Document]): RDD[String] = {
    rdd.map(s => {
      val map = s.entrySet()
      val obj = new JSONObject()
      val it = map.iterator()
      while (it.hasNext) {
        val temp = it.next()
        obj.put(temp.getKey, temp.getValue)
      }
      obj.toString
    })
  }

  def dealAlgorithm(params: String) {
    val obj = new JSONObject(params)
    val name = obj.getString("name")

    name match {
      case "LDA" => {
        obj.getInt("")
      }
    }


  }


  def filterParams(params: Array[String], rdd: RDD[String]): RDD[String] = {
    rdd.map(s => {
      val obj = new JSONObject(s)
      val retObj = new JSONObject()
      for (key <- params) {
        retObj.put(key, obj.get(key))
      }
      retObj.toString
    })
  }

  var mapWords: Map[Int, String] = Map[Int, String]();

  def tfidf(sqlCtx: SQLContext, rdd: RDD[String]): DataFrame = {

    val preRdd = rdd.map(s => {
      val obj = new JSONObject(s)

      val keys = obj.keys()
      val lists = new util.ArrayList[String]()
      val jieba = new JiebaSegmenter()

      while (keys.hasNext) {
        val key = keys.next().asInstanceOf[String]
        if (!key.toLowerCase.contains("id")) {
          lists.addAll(jieba.sentenceProcess(obj.getString(key)))
        }
      }
      //停用词
      val file = Source.fromFile("./src/main/resources/stop_word.txt")
      val list = new util.ArrayList[String]()
      for (line <- file.getLines()) {
        list.add(line)
      }
      lists.removeAll(list)
      (obj.optString("_id").toString, lists.mkString(" "))
    }).cache()

    println(preRdd.map(s => {
      s._2
    }).flatMap(s => {
      s.split(" ")
    }).map(s => (s, 1))
      .reduceByKey(_ + _).map(_._1).collect().toBuffer)


    val textDF = sqlCtx.createDataFrame(preRdd).toDF("id", "text")
    val tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")
    val wordsData = tokenizer.transform(textDF)


    val mllibHashingTF = new MllibHashingTF(262144)


    mapWords = wordsData.select("words").rdd
      .map(row => row.getAs[mutable.WrappedArray[String]](0))
      .flatMap(x => x).map(w => (mllibHashingTF.indexOf(w), w)).collect.toMap

    println(mapWords.toBuffer)


    val hashingTF = new HashingTF().setInputCol("words").setOutputCol("rawFeatures")


    val featurizedData = hashingTF.transform(wordsData)


    //计算每个词的TF-IDF
    val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
    val idfModel = idf.fit(featurizedData)
    val rescaledData = idfModel.transform(featurizedData)

    rescaledData.rdd.map(s => {
      val arr = s.getAs[mutable.WrappedArray[String]]("words")
      val vectors = s.getAs[SparseVector]("features")
      val values = vectors.values
      s
    }).count()


    rescaledData
  }


  def kmeans(data: DataFrame): Unit = {
    new KMeans()
      .setK(20)
      .setMaxIter(200)
      .fit(data)

    //    val clusters = KMeans.train(rdd, 20,200)
  }

  def lda(data: DataFrame): LDAModel = {
    val lda = new LDA()
      .setMaxIter(5)
      .setK(3)
    lda.fit(data)
  }


  private val base64decoder = new sun.misc.BASE64Decoder

  def base64decode(value: String): String = new String(base64decoder.decodeBuffer(value))


}
