package spark.mllib

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.{col, udf}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.language.implicitConversions

/**
  * Created by liuwei on 2017/7/24.
  */
object ColumnRowTransfromTest2 {


  def main(args: Array[String]): Unit = {
    import org.apache.spark.ml.clustering.LDA
    import org.apache.spark.ml.linalg.Vector

    val sparkConf = new SparkConf().setAppName("LDATest").setMaster("local[8]")
    val sc = new SparkContext(sparkConf)
    val spark = SparkSession.builder.getOrCreate()

    // Loads data.
    val dataset: DataFrame = spark.read.format("libsvm")
      .load("data/mllib/sample_lda_libsvm_data.txt")

    // Trains a LDA model.
    val lda = new LDA()
      .setK(4) //k: 主题数，或者聚类中心数 >1
      .setMaxIter(10) // MaxIterations：最大迭代次数 >= 0
      //      .setCheckpointInterval(1) //迭代计算时检查点的间隔  set checkpoint interval (>= 1) or disable checkpoint (-1)
      .setDocConcentration(1.0) //文章分布的超参数(Dirichlet分布的参数)，必需>1.0
      .setTopicConcentration(1.0)
    //主题分布的超参数(Dirichlet分布的参数)，必需>1.0
    //      .setOptimizer("online")   //默认 online 优化计算方法，目前支持"em", "online"
    val model = lda.fit(dataset)

    val ll = model.logLikelihood(dataset)
    val lp = model.logPerplexity(dataset)
    println(s"The lower bound on the log likelihood of the entire corpus: $ll")
    println(s"The upper bound on perplexity: $lp")

    val hm2 = new mutable.HashMap[Int, String]

    var data = sc.textFile("data/mllib/C0_segfeatures.txt").map(x => x.split(",")).collect()
    data.foreach { pair => hm2.put(pair(0).replaceAll("\"", "").toInt, pair(1).replaceAll("\"", "")) }
    println(hm2 + "============")


    val hm = mutable.HashMap(1 -> "b", 2 -> "c", 3 -> "d", 6 -> "a", 9 -> "e", 10 -> "f")

    //    model.l
    val resultUDF = udf((termIndices: mutable.WrappedArray[Integer]) => {
      //处理第二列输出
      termIndices.map(index =>
        hm2.get(index)
      )
    })

    // Describe topics.
    val topics = model.describeTopics(4).withColumn("termIndices", resultUDF(col("termIndices")))


    println(topics.schema)
    //      .withColumn("termIndices", resultUDF(col("termIndices"))).withColumn("termWeights", resultUDF(col("termWeights")))
    println("The topics described by their top-weighted terms:")


    //    topics.join(topics, wordDataset("index") === topics("termIndices")).show()
    topics.show(false)

    var size = 0
    val obj: Any = topics.select(col("termIndices")).take(1).apply(0).get(0)
    println("====￥￥￥￥￥￥￥￥￥￥￥￥" + obj)
    //    println("====￥￥￥￥￥￥￥￥￥￥￥￥" + topics.select(col("termWeights")).take(1).apply(0).get(0))
    if (obj.isInstanceOf[mutable.WrappedArray[Int]]) {
      size = obj.asInstanceOf[mutable.WrappedArray[Int]].size
    }
    println("====￥￥￥￥￥￥￥￥￥￥￥￥" + size)


    val res2 = topics.rdd.flatMap(row => {
      val rows = new ArrayBuffer[Row]
      for (i <- 0 until size) {
        var res = new ArrayBuffer[Any]
        res += row.getInt(0)
        val a = row.get(1)
//        println("====aaaaaaa" + a)
        res += row.getSeq(1)(i).toString + ":" + row.getSeq(2)(i)
        if(!"CONSIDERED".equals(row.getSeq(1)(i).toString)){
          rows.append(Row.fromSeq(res))
        }

      }
      rows.iterator
    }
    )


    println("=========================")

    val seq = ArrayBuffer.empty[StructField]

    val label = DataTypes.createStructField("label", DataTypes.IntegerType, true)
    seq.append(label)
    for (i <- 0 until size) {
      val vec = DataTypes.createStructField("vec" + i, DataTypes.StringType, true)
      seq.append(vec)
    }


    val schema2 = StructType(
      Seq(
        StructField("label", IntegerType, true)
        , StructField("vec", StringType, true)
      ))
    val resultDF2 = spark.createDataFrame(res2, schema2)
    resultDF2.show(false)

  }}
