package spark.mllib.synonyms

import org.apache.spark.ml.feature.Word2Vec
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.functions.{col, struct, udf}

import scala.collection.mutable

/**
  * Created by liuwei on 2017/9/8.
  */
object RowTest {
  val originDataUdf = udf((row: Row) => (0 until row.size).map(index => if (row.isNullAt(index)) "" else row.get(index).toString))


  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("ChiSquareTest").setMaster("local[8]")
    new SparkContext(sparkConf)
    val spark = SparkSession.builder.getOrCreate()
    import spark.implicits._

    val documentDF = spark.createDataFrame(Seq(
      "Hi I heard about Spark".split(" "),
      "I wish Java could use case classes".split(" "),
      "Logistic regression models are neat test".split(" ")
    ).map(Tuple1.apply)).toDF("text")

    val word2Vec = new Word2Vec()
      .setInputCol("text")
      .setOutputCol("result")
      .setVectorSize(4)
      .setMinCount(1)
    val model = word2Vec.fit(documentDF)
    val cosUDF = udf{
      (arr:mutable.WrappedArray[String] ) =>
        arr.length.toDouble
    }
    val leftDF = model.transform(documentDF).withColumn("wordSize",cosUDF(col("text")))
    val oriTitleNames = List("text","result","wordSize")
    val df = leftDF.withColumn("originData", originDataUdf(struct(oriTitleNames.head, oriTitleNames.tail: _*)))//.show(false)
    val selectIdUdf = udf{
      (arr:mutable.WrappedArray[String])=>
      arr(2)
    }
    df.withColumn("id",selectIdUdf(col("originData"))).drop("originData").show(false)

  }

}
