package spark.mllib

import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.feature.{Normalizer, PCA}
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.sql.functions.{col, udf}
import org.apache.spark.sql.types.{ArrayType, StringType, StructField, StructType}
import org.apache.spark.sql.{Column, DataFrame, Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

/**
  * Created by liuwei on 2017/7/24.
  */
object LDATest {
  def main(args: Array[String]): Unit = {
    import org.apache.spark.ml.clustering.LDA
    import org.apache.spark.ml.linalg.Vector
    import org.apache.spark.ml.linalg.Vectors

    val sparkConf = new SparkConf().setAppName("LDATest").setMaster("local[8]")
    val sc = new SparkContext(sparkConf)
    val spark = SparkSession.builder.getOrCreate()

    // Loads data.
    val dataset:DataFrame = spark.read.format("libsvm")
      .load("data/mllib/sample_lda_libsvm_data.txt")


    dataset.show(false)

    // Trains a LDA model.
    val lda = new LDA()
      .setK(10)//k: 主题数，或者聚类中心数 >1
      .setMaxIter(10)// MaxIterations：最大迭代次数 >= 0
//      .setCheckpointInterval(1) //迭代计算时检查点的间隔  set checkpoint interval (>= 1) or disable checkpoint (-1)
      .setDocConcentration(0.1) //文章分布的超参数(Dirichlet分布的参数)，必需>1.0
      .setTopicConcentration(0.1)//主题分布的超参数(Dirichlet分布的参数)，必需>1.0
      .setOptimizer("online")   //默认 online 优化计算方法，目前支持"em", "online"
    val model = lda.fit(dataset.select("features"))


    val ll = model.logLikelihood(dataset)
    val lp = model.logPerplexity(dataset)
    println(s"The lower bound on the log likelihood of the entire corpus: $ll")
    println(s"The upper bound on perplexity: $lp")

    val hm2 = new mutable.HashMap[Int,String]
//   val a =  sc.textFile("data/mllib/C0_segfeatures.txt").map( x => x.split(",")).map( x =>
//      hm2.put(x(0).replaceAll("\"","").toInt,x(1).replaceAll("\"",""))
////      hm2.put()
//    )
//    println(a.count())
//    hm2.put("ok","ok")

//    var data  = sc.textFile("data/mllib/C0_segfeatures.txt").map( x => x.split(",")).collect()
//    data.foreach{pair => hm2.put(pair(0).replaceAll("\"","").toInt,pair(1).replaceAll("\"",""))}
//    println(hm2+"============")

//    val rdd = sc.textFile("data/mllib/C0_segfeatures.txt").map( x => x.split(",")).map( x =>
//      Row(x(0).replaceAll("\"",""),x(1).replaceAll("\"",""))
//    )
//    var data = rdd.collect()
//    data.foreach{pair => hm2.put(pair._1,pair._2)}

//    val schema = StructType(
//      Seq(
//        StructField("index",StringType,true)
//        ,StructField("word",StringType,true)
//      )
//    )
//    val wordDataset = spark.createDataFrame(rdd,schema)

    val hm = mutable.HashMap(1 -> "b", 2 -> "c",3-> "d", 6 -> "a",9-> "e", 10 -> "f")

//    model.l
    val resultUDF = udf((termIndices: mutable.WrappedArray[Integer]) => {//处理第二列输出
      termIndices.map(index=>
//        hm2.get(index)
        index
      )
    })

    // Describe topics.
    val topics = model.describeTopics(10)//.withColumn("termIndices", resultUDF(col("termIndices")))



    println(topics.schema)
//      .withColumn("termIndices", resultUDF(col("termIndices"))).withColumn("termWeights", resultUDF(col("termWeights")))
    println("The topics described by their top-weighted terms:")


//    topics.join(topics, wordDataset("index") === topics("termIndices")).show()
    topics.show(false)
   val cosUDF = udf {
      (vector: Vector) =>
        vector.argmax
    }



    // Shows the result.
//    var transformed = model.transform(dataset)
//    transformed = transformed.withColumn("prediction",cosUDF(col("topicDistribution")))
//    println(transformed.schema)
//    transformed.show(false)
//    println(" transform start. ")



//    val mat = model.topicsMatrix
//    println(mat)
    /* val resultWithDistAndPCA = transformed.select("features").first().get(0).asInstanceOf[org.apache.spark.ml.linalg.Vector].size match {
      case 1 => {
        val zeroVectorUdf = udf {
          (v: Vector) => Vectors.zeros(0).toSparse
        }
        transformed.withColumn("2dimFeatures", zeroVectorUdf(col("features")))
      }
      case o => {
        val pca = new PCA()
        pca.setInputCol("features").setOutputCol("2dimFeatures").setK(2)
        val pcaModel = pca.fit(transformed)
        pcaModel.transform(transformed)
      }
    }
    println("resultWithDistAndPCA")
    resultWithDistAndPCA.show(false)


    val totalLines = resultWithDistAndPCA.count
    val exampleKindPer = new scala.collection.mutable.HashMap[Int, Int]
    val groupVal = resultWithDistAndPCA.groupBy("prediction").count().rdd.collect()
    for (kindNum <- groupVal) {
      val kind: Int = kindNum.getInt(0)
      //        val key = (kind + 1).toString
      val value = kindNum.getLong(1)
      exampleKindPer.put(kind, (value * 100 / totalLines).toInt + 1)
    }
//    val result = Clustering.LDARes.newBuilder()
    exampleKindPer.foreach(each => {
      val kind = each._1
      val limitNum = each._2
      resultWithDistAndPCA.filter("prediction = " + kind).limit(limitNum).collect().foreach(row => {
        val resSeq: org.apache.spark.ml.linalg.Vector = row.get(4).asInstanceOf[org.apache.spark.ml.linalg.Vector]
        resSeq.size match {
          case 2 => {
            println("x:"+resSeq.apply(0) +"y:"+resSeq.apply(1)+"z:"+(kind+1))
//            val pcaResult = Clustering.PcaResult.newBuilder
//              .setX(resSeq.apply(0))
//              .setY(resSeq.apply(1))
//              .setZ(kind + 1).build
//            result.addPcaResults(pcaResult)
          }
          case _ =>
        }
      })
    })

*/

  }


  def isByRegex(s : String,patt :String) = {
    val pattern = patt.r
    s match {
      case pattern(_*) => s
      case _ => null
    }
  }

}
