package com.qing

import java.util

import com.huaban.analysis.jieba.JiebaSegmenter
import com.mongodb.spark.MongoSpark
import com.mongodb.spark.rdd.MongoRDD
import com.qing.utils.MongodbUtils
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.{Estimator, Model}
import org.apache.spark.ml.clustering.{KMeans, LDA}
import org.apache.spark.ml.feature.{CountVectorizer, NGram, Word2Vec}
import org.apache.spark.ml.linalg.{DenseVector, SparseVector, Vectors}
import org.apache.spark.ml.param.{ParamPair, Params}
import org.apache.spark.ml.util.{BaseReadWrite, DefaultParamsWritable, MLWriter}
import org.apache.spark.mllib.feature.{HashingTF, IDF}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{ArrayType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.bson.Document
import org.codehaus.jettison.json.{JSONArray, JSONObject}
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.JavaConversions._
import scala.io.Source
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._

/**
  * Created by Administrator on 2017/12/22 0022.
  */
object Main {

  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.ERROR)
    if (args.size > 0) {
      val obj = new JSONObject(base64decode(args.apply(0)))
      val method = obj.optString("method")

      method match {
        case "import" => {
          //数据导入
          val scheme = obj.optString("scheme")
          scheme match {
            case "mongodb" => {

              val result = getRddFromMongodb(obj).take(5).toBuffer

              val json = new JSONObject()
              val arr = new JSONArray()

              for (t <- result) {
                val s = new JSONObject(t)
                arr.put(s)
              }
              json.put("data", arr)
              json.put("message", "success")
              json.put("code", 1)
              println(json.toString)
            }
          }

        }

        case "algorithm" => {
          val operation = obj.optString("operation")
          operation match {
            case "lda" => {
              lda(obj)
            }
            case "kmeans" => {
              kmeans(obj)
            }
          }
        }
      }
    }


  }

  var sc: SparkContext = null
  var sqlContext: SQLContext = null

  private def getRddFromMongodb(obj: JSONObject): RDD[String] = {
    val path = obj.optString("path", "mongodb://175.102.18.112:27018/OpenCollege_kw.art_syn")
    val sparkConf = new SparkConf()
      .set("spark.mongodb.input.uri", path)
    sc = new SparkContext("local", "simple-ml", sparkConf)

    sqlContext = new SQLContext(sc)

    val rdd = MongoSpark.load(sc)
    var result = doc2Json(rdd)
    val filter = obj.optString("filter").split(",")
    if (filter.length > 1) {
      result = filterParams(filter, result)
    }
    result
  }

  def filterParams(params: Array[String], rdd: RDD[String]): RDD[String] = {
    rdd.map(s => {
      val obj = new JSONObject(s)
      val retObj = new JSONObject()
      for (key <- params) {
        retObj.put(key, obj.get(key))
      }
      retObj.toString
    })
  }

  def splitWords(rdd: RDD[String]): RDD[Seq[String]] = {
    rdd.map(s => {
      val obj = new JSONObject(s)

      val keys = obj.keys()
      val lists = new util.ArrayList[String]()
      val jieba = new JiebaSegmenter()

      while (keys.hasNext) {
        val key = keys.next().asInstanceOf[String]
        if (!key.toLowerCase.contains("id")) {
          lists.addAll(jieba.sentenceProcess(obj.getString(key)))
        }
      }
      //停用词
      val file = Source.fromFile("./src/main/resources/stop_word.txt")
      val list = new util.ArrayList[String]()
      list.add(" ")
      list.add("  ")
      for (line <- file.getLines()) {
        list.add(line)
      }
      lists.removeAll(list)
      lists.toSeq
    }).cache()

  }


  def doc2Json(rdd: MongoRDD[Document]): RDD[String] = {
    rdd.map(s => {
      val map = s.entrySet()
      val obj = new JSONObject()
      val it = map.iterator()
      while (it.hasNext) {
        val temp = it.next()
        obj.put(temp.getKey, temp.getValue)
      }
      obj.toString
    })
  }


  def tfidf(obj: JSONObject): DataFrame = {
    val documents = splitWords(getRddFromMongodb(obj))
    val hashingTF = new HashingTF()

    val tf = hashingTF.transform(documents)

    val mapWords = documents.flatMap(x => x)
      .map(w => (hashingTF.indexOf(w), w))
      .collect
      .toMap

    val words = tf.context.broadcast(mapWords)

    val idf = new IDF(4).fit(tf)

    val tf_idf = idf.transform(tf).map(s => {

      val arr = s.toArray
      val len = arr.length

      val map = new util.HashMap[Int, Double]()
      for (i <- 0 until len) {
        if (arr.apply(i) != 0) {
          map.put(i, arr.apply(i))
        }
      }

      (0, Vectors.sparse(len, map.toSeq))
    })


    sqlContext.createDataFrame(tf_idf).toDF("id", "features").select("features")
  }

  def word2vector(obj: JSONObject): DataFrame = {
    val documents = rdd2df(splitWords(getRddFromMongodb(obj)))

    new Word2Vec()
      .setInputCol("text")
      .setOutputCol("features")
      .setMinCount(4)
      .setVectorSize(10)
      .setMaxIter(10)
      .fit(documents).transform(documents)
      .select("features")
  }

  def rdd2df(rdd: RDD[Seq[String]]): DataFrame = {
    val rdd2 = rdd.map(s => Row(s.toBuffer))
    val structType = StructType(List(StructField("text", ArrayType(StringType, true), true)))
    sqlContext.createDataFrame(rdd2, structType).toDF("text")
  }


  def transform(obj: JSONObject): DataFrame = {
    val transform = obj.optString("transform")
    transform match {
      case "tfidf" => {
        tfidf(obj)
      }
      case "word2vector" => {
        word2vector(obj)
      }
    }
  }


  def lda(obj: JSONObject) = {

    if (obj.optString("transform").equals("word2vector")) {
      val ret = new JSONObject()
      ret.put("code", 1)
      ret.put("message", "lda算法不能与word2vector混合使用，建议使用kmeans")
      println(ret.toString)
    } else {

      val trans = transform(obj)

      val params = new JSONObject()
      params.put("k", "3")
      params.put("iter", "2")
      //    val params = obj.optJSONObject("params")
      val lda = new LDA()
        .setK(params.optInt("k", 3))
        .setMaxIter(params.optInt("iter", 2))
      val model = lda.fit(trans).transform(trans)

      for (field <- lda.getClass.getFields) {
        println(field + "...")
      }

      //      for (method <- lda.getClass.getMethods) {
      //        val a = method.getName
      //        if(a.startsWith("get")){
      //          println(a)
      //        }
      //
      //      }


      //      lda.save("model")
      val cls = Class.forName("org.apache.spark.ml.clustering.LDA")
      val getModel = loadModel(cls, getModel(lda))

      val retModel = getModel match {
        case LDA => {
          getModel.asInstanceOf[LDA]
        }
        case KMeans => {
          getModel.asInstanceOf[KMeans]
        }
      }
      println(retModel.getClass.getName)


      //      val result = model.take(5).toBuffer
      //
      //
      //
      //      val json = new JSONObject()
      //      val arr = new JSONArray()
      //
      //      for (t <- result) {
      //
      //
      //
      //        arr.put()
      //      }
      //      json.put("data", arr)
      //      json.put("message", "success")
      //      json.put("code", 1)
      //      println(json.toString)
    }


  }


  def kmeans(obj: JSONObject) = {
    val trans = transform(obj)

    val params = new JSONObject()
    params.put("k", "10")
    params.put("iter", "20")

    val kmeans = new KMeans()
      .setK(params.optInt("k", 10))
      .setMaxIter(params.optInt("iter", 200))
      .setSeed(0)

    kmeans.fit(trans).transform(trans).show(3)

  }

  def loadModel[T](clazz: Class[T], modelStr: String): T = {
    val obj = new JSONObject(modelStr)
    val paramMap = obj.getJSONObject("paramMap")
    val cls = Class.forName(obj.optString("class"))

    val model = cls.getConstructor(Class.forName("java.lang.String")).newInstance(paramMap.optString("uid")).asInstanceOf[T]


    for (method <- cls.getMethods) {
      val name = method.getName
      if (method.getName.startsWith("set") && name.length > 3) {
        val keys = paramMap.keys().toList
        val key = name.substring(3, 4).toLowerCase + name.substring(4)

        if (keys.contains(key)) {
          method.invoke(model, paramMap.get(key))
        }
      }
    }
    model
  }


  def saveModel(obj: JSONObject, write: Params): Unit = {

    val uid = write.uid
    val cls = write.getClass.getName
    val params = write.extractParamMap().toSeq.asInstanceOf[Seq[ParamPair[Any]]]

    val paramMap = new JSONObject()

    val jsonParams = params.map {
      case ParamPair(p, v) => {
        p.name -> parse(p.jsonEncode(v))
      }
    }.toList

    jsonParams.foreach(s => {
      paramMap.put(s._1, s._2.values)
    })


    val obj = new JSONObject()
    obj.put("class", cls)
    obj.put("timestamp", System.currentTimeMillis())
    obj.put("uid", uid)
    obj.put("paramMap", paramMap)

    val collection = MongodbUtils.getModelCollection
    val document = new Document()
    document.put("name", obj.optString(""))
    document.put("class", cls)
    document.put("model", obj.toString)
    document.put("request", obj.toString)
    collection.insertOne(document)
  }


  private val base64decoder = new sun.misc.BASE64Decoder

  def base64decode(value: String): String = new String(base64decoder.decodeBuffer(value))


}
