package com.etc

import org.apache.spark.SparkConf
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.regression.IsotonicRegression
import org.apache.spark.sql.SparkSession

/**
  * 保序回归
  */
object lsotonic {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("linner")
    val spark = SparkSession.builder().config(conf).getOrCreate()

    val file = spark.read.format("csv").option("sep",";").option("header","true").load("house.csv")

    import spark.implicits._

    //生成随机数
    val random = new util.Random()

    val data = file.select("square","price").map(rows => (rows.getAs[String](0).toDouble,rows.getString(1)
      .toDouble,random.nextDouble())).toDF("square","price","random")
      .sort("random")


    //类似封装成 数组
    val assembler = new VectorAssembler()
      .setInputCols(Array("square"))
      .setOutputCol("features")

    val frame = assembler.transform(data)

    //把数据集拆分2个部分
    val Array(train,test) = frame.randomSplit(Array(0.8,0.2))

    val lso = new IsotonicRegression().setLabelCol("price").setFeaturesCol("features")

    lso.fit(train).transform(test).show()






  }
}
