package cn.lgwen.spark.ml.learning

import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{Encoders, Row, RowFactory, SparkSession}

object Titanci {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder.master("local[*]")
      .appName("PCAExample")
      .getOrCreate()
    import spark.implicits._
    val source = spark.read.schema("PassengerId INT, Survived INT, Pclass DOUBLE, name STRING, " +
      "sex STRING, age INT, SibSp INT, Parch INT, Ticket STRING, Fare DOUBLE, Cabin STRING, Embarked STRING").csv("G:\\tmp\\train.csv")

    //"PassengerId", "Survived","Pclass","Name","Sex","Age","SibSp","Parch","Ticket","Fare","Cabin","Embarked"
    source.createOrReplaceTempView("data_source")


    val subDf = source
      .select("Age", "Fare", "Parch", "SibSp", "Pclass").filter(x => x.get(0) != null)
      .map(x=>{
        Row.fromSeq( Seq(x.get(0), Vectors.dense(x.getDouble(1), x.getDouble(2),x.getDouble(3),x.getDouble(4))))
      })(Encoders.bean(classOf[Row]))

    subDf.show()
  }
}
