package com.etc

import java.util.Random

import org.apache.spark.SparkConf
import org.apache.spark.ml.classification.{DecisionTreeClassifier, NaiveBayes}
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.sql.SparkSession


/**
  * svm分类
  */
object svm {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("linner")
    val spark = SparkSession.builder().config(conf).getOrCreate()

    val file = spark.read.format("csv").load("iris.data")

    import spark.implicits._
    val random = new Random()
    val data = file.map(row =>{
      val label =  row.getString(4) match {
        case "Iris-setosa" => 0
        case "Iris-versicolor" => 1
        case "Iris-virginica" => 2
      }

      (row.getString(0).toDouble,
        row.getString(1).toDouble,
        row.getString(2).toDouble,
        row.getString(3).toDouble,
        label,
        random.nextDouble())
    }).toDF("_c0","_c1","_c2","_c3","label","rand").sort("rand")//.where("label = 1 or label = 0")


    //特征包装
    val assembler = new VectorAssembler().setInputCols(Array("_c0","_c1","_c2","_c3")).setOutputCol("features")

    val frame = assembler.transform(data)

    val Array(train,test) = frame.randomSplit(Array(0.8,0.2))


//    val svm = new LinearSVC().setMaxIter(20).setRegParam(0.1)






  }
}
