package spark.mllib


import java.util

import org.apache.spark.ml.feature.Word2Vec
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.RowFactory
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types._


/**
  * Created by liuwei on 2017/5/15.
  */
object Word2VecTest {

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setAppName("ChiSquareTest").setMaster("local[8]")
    val sc = new SparkContext(sparkConf)
    val spark = SparkSession.builder

    val ss = spark.getOrCreate();
    val documentDF = ss.createDataFrame(Seq(
      "Hi I heard about Spark".split(" "),
      "I wish Java could use case classes I ".split(" "),
      "Logistic regression models are neat".split(" ")
    ).map(Tuple1.apply)).toDF("text")
//val documentDF = ss.createDataFrame(Seq(
//  Array("刘伟,刘伟"),
//  Array("张三.2"),
//  Array("李四,3")
//).map(Tuple1.apply)).toDF("text")
    println("documentDF.schema"+documentDF.schema)
    documentDF.show(false)
    // Learn a mapping from words to Vectors.
    val word2Vec = new Word2Vec()
      .setInputCol("text")
      .setOutputCol("result")
      .setVectorSize(4)
      .setMinCount(1)
    val model = word2Vec.fit(documentDF)
    val result = model.transform(documentDF)
    println(result.schema)
    println(result.columns.size)
    result.show()
//    result.select("result").take(4).foreach(println)


//    val word2vec = new Word2Vec()
//    word2vec.setSeed(42) // we do this to generate the same results each time



//    val spark = SparkSession.builder.master("local").
//      config("spark.sql.warehouse.dir", "file:///").appName("JavaWord2VecExample").getOrCreate
//    val sparkSession = SparkSession.builder.getOrCreate
//    val schema = StructType(
//      Seq(
//        StructField("column1", StringType, true)
//        , StructField("column2", StringType, true)
//        , StructField("column3", StringType, true)
//      )
//    )
//
//    // $example on$
//    // Input data: Each row is a bag of words from a sentence or document.
//    val data = util.Arrays.asList(RowFactory.create(
//      util.Arrays.asList("Hi I heard about Spark".split(" "))),
//      RowFactory.create(util.Arrays.asList("I wish Java could use case classes".split(" "))),
//      RowFactory.create(util.Arrays.asList("Logistic regression models are neat".split(" "))))
////    val schema = new Nothing(Array[StructField](new StructField("text", new Nothing(DataTypes.StringType, true), false, Metadata.empty)))
////    spark.createDataFrame(data,schema)
//    val documentDF = sparkSession.createDataFrame(data, schema)

    // Learn a mapping from words to Vectors.
    /*val word2Vec = new Word2Vec().s.setInputCol("text").setOutputCol("result").setVectorSize(3).setMinCount(0)

    val model = word2Vec.fit(documentDF)
    /*打印词向量*/
    val words = model.getVectors
    import scala.collection.JavaConversions._
    for (row <- words.collectAsList) {
      val word = row.getString(0)
      val vector = row.get(1).asInstanceOf[Nothing]
      System.out.println("Word: " + word + " => \nVector: " + vector + "\n")
    }*/

    /*打印输入语料中每一行句子对应的向量*/
//    val result = model.transform(documentDF)
//    import scala.collection.JavaConversions._
//    for (row <- result.collectAsList) {
//      val text = row.getList(0)
//      val vector = row.get(1).asInstanceOf[Nothing]
//      System.out.println("Text: " + text + " => \nVector: " + vector + "\n")
//    }
    // $example off$
//    val word2vecModel = word2vec.fit(tokens)
//    val v1: Vector = Vectors.dense(0.083019603,
//    0.808381114,
//    0.232360547,
//    0.001443696,
//    0.233725589,
//    0.660635917,
//    0.005554283,
//    0.997905135
//
//    )
//


  }

}
