package com.etc

import org.apache.spark.SparkConf
import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.sql.SparkSession

object Linearregression {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("linner")
    val spark = SparkSession.builder().config(conf).getOrCreate()

    val file = spark.read.format("csv").option("sep",";").option("header","true").load("house.csv")

    import spark.implicits._

    //生成随机数
    val random = new util.Random()

    val data = file.select("square","price").map(rows => (rows.getAs[String](0).toDouble,rows.getString(1)
      .toDouble,random.nextDouble())).toDF("square","price","random")
      .sort("random")

    /**
      *
      * 从源数据中提取特征指标数据，这是一个比较典型且通用的步骤，因为我们的原始数据集里，
      * 经常会包含一些非指标数据，如 ID，Description 等。为方便后续模型进行特征输入，需要部分列的数据转换为特征向量，
      * 并统一命名，VectorAssembler类完成这一任务。VectorAssembler是一个transformer，将多列数据转化为单列的向量列
      *
      */
    val assembler = new VectorAssembler()
      .setInputCols(Array("square"))
      .setOutputCol("features")

    val frame = assembler.transform(data)

    val Array(train,test) = frame.randomSplit(Array(0.8,0.2),1234L)

    train.show()

    val lr = new LogisticRegression()
      .setStandardization(true)
      .setMaxIter(10)
      .setRegParam(0.3)
      .setElasticNetParam(0.8)

    val model = lr.setLabelCol("price").setFeaturesCol("features").fit(train)

    model.transform(test).show()

    val s = model.summary.totalIterations
    println(s"iter: ${s}")



  }

}
