package spark.mllib

import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.sql.functions.{col, udf}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable

/**
  * Created by liuwei on 2017/7/24.
  */
object LDATest2 {
  def main(args: Array[String]): Unit = {
    import org.apache.spark.ml.clustering.LDA

    val sparkConf = new SparkConf().setAppName("LDATest").setMaster("local[8]")
    val sc = new SparkContext(sparkConf)
    val spark = SparkSession.builder.getOrCreate()

    // Loads data.
//    val dataset:DataFrame = spark.read.format("libsvm")
//      .load("data/mllib/sample_lda_libsvm_data.txt")
//
//    println(dataset.schema)
//    dataset.show(false)
    import spark.implicits._
//    val df= sc.textFile("C:\\Users\\lenovo\\Desktop\\abc.csv").map { line =>
//       line.split(',')
//
//    }
    val stringToVectorUdf = udf { (s: String) =>
      Vectors.parse(s).asML
    }

    val df = spark.read.format("com.databricks.spark.csv").load("C:\\Users\\lenovo\\Desktop\\abc.csv")
      //.toDF("PaperID","题目","作者_segfeatures")
    df.show()
    println(df.schema)

    val df2 = df.withColumn("_c1", stringToVectorUdf(df.col("_c1")))
    println(df2.schema)
  }




}
