package spark.mllib

import java.util.ArrayList

import org.apache.spark.ml.linalg.Vector
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.{col, udf}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}
import org.slf4j.LoggerFactory

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.language.implicitConversions

/**
  * Created by liuwei on 2017/7/24.
  */
object ColumnRowTransfromTest3 {

  private val logPrefix = " ===== ColumnTransformRowJob ===== "
//  val log = LoggerFactory.getLogger(classOf[ColumnRowTransfromTest3])


  def main(args: Array[String]): Unit = {
    import org.apache.spark.ml.clustering.LDA

    val sparkConf = new SparkConf().setAppName("LDATest").setMaster("local[8]")
    val sc = new SparkContext(sparkConf)
    val spark = SparkSession.builder.getOrCreate()

    // Loads data.
    val dataset: DataFrame = spark.read.format("libsvm")
      .load("data/mllib/sample_lda_libsvm_data.txt")

    dataset.show()
    val array = ArrayBuffer[Int]()
    val titleNames = new ArrayList[String]()
    titleNames.add("label");
    titleNames.add("features");
    val analysisColumns = Seq("features")
    for(k <- 0 until titleNames.size()){
      if(!analysisColumns.contains(titleNames.get(k))){
        array += k
      }
    }
    var df:DataFrame = null
    val inputDf = dataset
    analysisColumns .foreach(columnName => {
      df = inputDf.drop(columnName)
    })
    var res:RDD[Row] = null
    val feature_name = DataTypes.createStructField("feature_name", DataTypes.StringType, true)
    val feature_value = DataTypes.createStructField("feature_value", DataTypes.StringType, true)

    val schemaNew = df.schema.add(feature_name).add(feature_value)
    var outdf:DataFrame = null
    println(logPrefix + " receive parameter values analysisColumns : " + analysisColumns
       )
    println(logPrefix + " schemaNew : " + schemaNew  )
    analysisColumns .foreach(columnName => {
      val columnIndex = titleNames.indexOf(columnName)
      if (-1 == columnIndex) {
        //nothing to do
      } else {
        val fromType = "Vector"
        //        val resultDF2 = spark.createDataFrame(res, schemaNew)
        println(logPrefix + " fromType : " + fromType  )
        if("TEXT".equals(fromType)){
          val res = inputDf.rdd.map(
            row =>{
              var res = new ArrayBuffer[Any]
              for(j <- 0 until array.size){//普通列根据array索引获取
                res += row.get(j)
              }
              res += columnName
              res += row.getString(columnIndex)
              Row.fromSeq(res)
            }
          )
          val resultDF2 = spark.createDataFrame(res, schemaNew)
          outdf =  if(null == outdf)  resultDF2      else   outdf.union(resultDF2)
        } else{
          var size = 0
          val obj: Any = inputDf.select(columnName).take(1).apply(0).get(0)
          if (obj.isInstanceOf[Vector]) {
            size = obj.asInstanceOf[Vector].size
          }
          val res = inputDf.rdd.mapPartitions(rowIter => {
            val rows = new ArrayBuffer[Row]
            while(rowIter.hasNext) {
              val row = rowIter.next
              for (i <- 0 until size) {
                var res = new ArrayBuffer[Any]
                for(j <- 0 until array.size){//普通列根据array索引获取
                  println("row.get(j)"+row.get(j))
                  res += row.get(j)
                }
                val vec = row.get(columnIndex)//根据vector索引获取
                if (vec.isInstanceOf[Vector]) {
                  res += i.toString

//                  println("row.get(j)"+row.get(j))
                  res += vec.asInstanceOf[Vector].apply(i).toString
                  rows.append(Row.fromSeq(res))
                }
              }
            }
            rows.iterator
          })

          println(logPrefix + " res : " +res.count()   )
          val resultDF2 = spark.createDataFrame(res, schemaNew)
          outdf =  if(null == outdf)  resultDF2   else  outdf.union(resultDF2)
        }
      }

    })
    println(logPrefix + " outdf.schema : " + outdf.schema  )
    println(logPrefix + " outdf.count : " + outdf.count  )
    outdf.show()

  }}
