package spark.mllib

import org.apache.spark.sql.functions.{col, udf}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable
import scala.collection.JavaConverters._
import java.io.CharArrayWriter
import java.util

import breeze.linalg.Axis._1

import scala.collection.JavaConverters._
import scala.language.implicitConversions
import scala.reflect.runtime.universe.TypeTag
import scala.util.control.NonFatal
import org.apache.commons.lang3.StringUtils
import org.apache.spark.annotation.{DeveloperApi, Experimental, InterfaceStability}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.api.java.function._
import org.apache.spark.api.python.{PythonRDD, SerDeUtil}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.encoders._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.json.JacksonGenerator
import org.apache.spark.sql.catalyst.optimizer.CombineUnions
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.physical.{Partitioning, PartitioningCollection}
import org.apache.spark.sql.catalyst.util.usePrettyExpression
import org.apache.spark.sql.execution.{FileRelation, LogicalRDD, QueryExecution, SQLExecution}
import org.apache.spark.sql.execution.command.{CreateViewCommand, ExplainCommand, GlobalTempView, LocalTempView}
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.execution.python.EvaluatePython
import org.apache.spark.sql.streaming.DataStreamWriter
import org.apache.spark.sql.types._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.unsafe.types.CalendarInterval
import org.apache.spark.util.Utils
import spark.{LDAT, LDATI}

import scala.collection.mutable.ArrayBuffer
import scala.util.Try

/**
  * Created by liuwei on 2017/7/24.
  */
object ColumnRowTransfromTest {

  def main(args: Array[String]): Unit = {
    import org.apache.spark.ml.clustering.LDA
    import org.apache.spark.ml.linalg.Vector

    val sparkConf = new SparkConf().setAppName("LDATest").setMaster("local[8]")
    val sc = new SparkContext(sparkConf)
    val spark = SparkSession.builder.getOrCreate()

    // Loads data.
    val dataset: DataFrame = spark.read.format("libsvm")
      .load("data/mllib/sample_lda_libsvm_data.txt")

    // Trains a LDA model.
    val lda = new LDA()
      .setK(3) //k: 主题数，或者聚类中心数 >1
      .setMaxIter(10) // MaxIterations：最大迭代次数 >= 0
      //      .setCheckpointInterval(1) //迭代计算时检查点的间隔  set checkpoint interval (>= 1) or disable checkpoint (-1)
      .setDocConcentration(1.0) //文章分布的超参数(Dirichlet分布的参数)，必需>1.0
      .setTopicConcentration(1.0)
    //主题分布的超参数(Dirichlet分布的参数)，必需>1.0
    //      .setOptimizer("online")   //默认 online 优化计算方法，目前支持"em", "online"
    val model = lda.fit(dataset)

    val ll = model.logLikelihood(dataset)
    val lp = model.logPerplexity(dataset)
    println(s"The lower bound on the log likelihood of the entire corpus: $ll")
    println(s"The upper bound on perplexity: $lp")

    val hm2 = new mutable.HashMap[Int, String]

    var data = sc.textFile("data/mllib/C0_segfeatures.txt").map(x => x.split(",")).collect()
    data.foreach { pair => hm2.put(pair(0).replaceAll("\"", "").toInt, pair(1).replaceAll("\"", "")) }
    println(hm2 + "============")


    val hm = mutable.HashMap(1 -> "b", 2 -> "c", 3 -> "d", 6 -> "a", 9 -> "e", 10 -> "f")

    //    model.l
    val resultUDF = udf((termIndices: mutable.WrappedArray[Integer]) => {
      //处理第二列输出
      termIndices.map(index =>
        hm2.get(index)
      )
    })

    // Describe topics.
    val topics = model.describeTopics(5).withColumn("termIndices", resultUDF(col("termIndices")))


    println(topics.schema)
    //      .withColumn("termIndices", resultUDF(col("termIndices"))).withColumn("termWeights", resultUDF(col("termWeights")))
    println("The topics described by their top-weighted terms:")


    //    topics.join(topics, wordDataset("index") === topics("termIndices")).show()
//    topics.show(false)

    var size = 0
    val obj: Any = topics.select(col("termIndices")).take(1).apply(0).get(0)
    println("====￥￥￥￥￥￥￥￥￥￥￥￥" + obj)
    //    println("====￥￥￥￥￥￥￥￥￥￥￥￥" + topics.select(col("termWeights")).take(1).apply(0).get(0))
    if (obj.isInstanceOf[mutable.WrappedArray[Int]]) {
      size = obj.asInstanceOf[mutable.WrappedArray[Int]].size
    }
    println("====￥￥￥￥￥￥￥￥￥￥￥￥" + size)


    val res: RDD[Row] = topics.rdd.map(row => {
      var res = new ArrayBuffer[Any]
      res += row.getInt(0)
      for (i <- 0 until size) {
        //          println(i + "!!!" + row.getSeq(1)(i).toString + ":" + row.getSeq(2)(i))
        res += row.getSeq(1)(i).toString + ":" + row.getSeq(2)(i)
      }
      Row.fromSeq(res)
    })


   val res2 =  topics.rdd.flatMap(row => {
      val rows = new ArrayBuffer[Row]
      for (i <- 0 until size) {
        var res = new ArrayBuffer[Any]
        res += row.getInt(0)
        res += row.getSeq(1)(i)
        res += row.getSeq(2)(i)
//        var ldaTopics = new LDAT(row.getInt(0),row.getSeq(1)(i),row.getSeq(2)(i))
//        list22222.add(ldaTopics)
        rows.append(Row.fromSeq(res))
      }
     rows.iterator
    }
    )
    res2.count()



    println("=========================")
    val map = res.foreach(row => {
      println(row.mkString("="))
    })
//    val count = res.count()
    val seq = ArrayBuffer.empty[StructField]

    val label = DataTypes.createStructField("label", DataTypes.IntegerType, true)
    seq.append(label)
    for (i <- 0 until size) {
      val vec = DataTypes.createStructField("vec" + i, DataTypes.StringType, true)
      seq.append(vec)
    }

//    println("count==" + count)
    val schema = StructType(
      seq
    )

//    val resultDF = spark.createDataFrame(res, schema)
//    resultDF.show(false)

  val schema2 = StructType(
      Seq(
                StructField("label", IntegerType, true)
                , StructField("key", StringType, true)
        , StructField("vec", DoubleType, true)
    ))
    val resultDF2 = spark.createDataFrame(res2, schema2)//.sortWithinPartitions("vec")


    resultDF2.show

    println("rownum:5")
    println("columnnum:3")
//    var list = new util.ArrayList[LDATI]
   val ldaMap = new util.HashMap[Int,util.List[LDATI]]

      resultDF2.collect.foreach(
     row => {
       val ladti = new LDATI(row.getString(1),row.getDouble(2))
       if(ldaMap.containsKey(row.getInt(0))){
         ldaMap.get(row.getInt(0)).add(ladti)
       }else{
         var list = new util.ArrayList[LDATI]
         list.add(ladti)
         ldaMap.put(row.getInt(0),list)
       }
//      println(row.getInt(0),row.getString(1),row.getDouble(2))
    })
    println(ldaMap)

   //    println(resultDF2.schema)

          val cosUDF = udf {
             (vector: Vector) =>
               vector.argmax
           }
       var transformed = model.transform(dataset)
       transformed = transformed.withColumn("prediction",cosUDF(col("topicDistribution")))
        val redf = transformed.select("prediction").rdd.map(row =>(row.getAs[Int](0),1)).reduceByKey(_+_)


       var transformedSize = 0
       val obj2: Any = transformed.select(col("topicDistribution")).take(1).apply(0).get(0)
       //    println("====￥￥￥￥￥￥￥￥￥￥￥￥" + topics.select(col("termWeights")).take(1).apply(0).get(0))
       if (obj2.isInstanceOf[Vector]) {
         transformedSize = obj2.asInstanceOf[Vector].size
       }
       val res3: RDD[Row] = transformed.select("topicDistribution").rdd.map(row => {
         var res = new ArrayBuffer[Any]
         for (i <- 0 until transformedSize) {
           //          println(i + "!!!" + row.getSeq(1)(i).toString + ":" + row.getSeq(2)(i))
           val vec = row.get(0)
           if (vec.isInstanceOf[Vector]) {
             res += i.toString + ":" +vec.asInstanceOf[Vector].apply(i)
           }

         }
         Row.fromSeq(res)
       })
       val seq3 = ArrayBuffer.empty[StructField]

   //    val label = DataTypes.createStructField("label", DataTypes.IntegerType, true)
   //    seq3.append(label)
       for (i <- 0 until transformedSize) {
         val vec = DataTypes.createStructField("vec" + i, DataTypes.StringType, true)
         seq3.append(vec)
       }
       val schema3 = StructType(
         seq3
       )
       val resultDF3 = spark.createDataFrame(res3, schema3)
       println("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
       resultDF3.show(false)
       transformed = transformed.drop("topicDistribution")
       val outputRDDWithIndex = transformed.rdd.zip(resultDF3.rdd)
       val RowRDD = outputRDDWithIndex.map(row => {
         var res = new ArrayBuffer[Any]
         res ++= row._1.toSeq
         res ++= row._2.toSeq
         Row.fromSeq(res)
       })
       var schema4  = transformed.schema
       for (i <- 0 until transformedSize) {
         schema4 = schema4.add(StructField("vec"+i,  DataTypes.StringType))
       }
       println(transformedSize)
       println(schema4)
   //    val schema4 = transformed.schema.add(StructField(columnName, StringType)
       val df = spark.createDataFrame(RowRDD, schema4)
       df.show(false)

    redf.collect().foreach(row=>
      println(row._1+"====="+row._2)
//        println(row._2)
    )
    println()

  }
}


//   val cosUDF = udf {
//      (vector: Vector) =>
//        vector.argmax
//    }


// Shows the result.
//    var transformed = model.transform(dataset)

//    transformed = transformed.withColumn("prediction",cosUDF(col("topicDistribution")))
//    println(transformed.schema)
//    transformed.show(false)
//    println(" transform start. ")

//    val cols = Array("features","prediction")
//    val s = cols.foreach(col => "\""+col+"\""+",")

//    val s: String*  = cols
//    val splitUDF = udf {
//      (vector: Vector) =>
//        var k = ""
//        for(i <- 0 until vector.size){
//          k = k  + i.toString+":"+ vector.apply(i)
//        }
//        k
//    }

/*val obj =transformed.select(col("features")).take(1).apply(0).get(0)
if (obj.isInstanceOf[Vector]){
  size = obj.asInstanceOf[Vector].size
}
println("====￥￥￥￥￥￥￥￥￥￥￥￥"+size)
for(i <- 0 until size){
  val titleName = "vec_" + i
  val rdd:RDD[Row] = transformed.select(col("features")).rdd
  val resRdd:RDD[Row] = rdd.map(
    row =>{
//          println( "======"+row.size)
      println( "======"+row.getAs[org.apache.spark.ml.linalg.Vector](0).apply(i))
//          Row.merge(row,Row(row.getAs[org.apache.spark.ml.linalg.Vector](0).apply(i)))
      row
    }*/


//        row =>row.getAs[org.apache.spark.ml.linalg.Vector](0).apply(i)
/* resRdd.map(row =>{
   println("====%%%^^^^^"+row.size)
   for(k <- 0 to row.size){
     println("====%%%"+row.get(k))
   }
  }
 )*/
//      resRdd.count()
//      println("^^^^^"+resRdd)

//    println( "size===="+ size)

//    val df = transformed.withColumn("AAA",splitUDF(col("features")))
//    df.show(false)

//  }


//}
