package sparkml_study

import org.apache.spark.sql.SparkSession
import org.apache.spark.ml.classification.DecisionTreeClassificationModel
import org.apache.spark.ml.classification.DecisionTreeClassifier
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
import org.apache.spark.ml.feature.{IndexToString, StringIndexer, VectorAssembler, VectorIndexer}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.types.DoubleType

object JueCeShu {
  def main(args: Array[String]): Unit = {
      val spark=SparkSession.builder()
        .master("local[*]")
        .appName("决策树分类器")
        .getOrCreate()

    val data=spark.read.option("inferSchema","true")
      .csv("hdfs://192.168.40.110:9000/spark_test_data/test_data.csv")


    val VectorAssembler=new VectorAssembler()
      .setInputCols(Array("_c0","_c1","_c2","_c3"))
      .setOutputCol("vector")

    val vector_data=VectorAssembler.transform(data)


   val df= vector_data.select("vector","_c4")
     .withColumnRenamed("_c4","label")
     .withColumnRenamed("vector","features")

    //  进一步处理特征和标签，把数据集随机分成训练集和测试机
    val labelIndexer=new StringIndexer()
      .setInputCol("label")
      .setOutputCol("indexedLabel")
      .fit(df)

    val featureIndexer=new VectorIndexer()
      .setInputCol("features")
      .setOutputCol("indexedFeatures")
      .setMaxCategories(4)
      .fit(df)

    val labelConverter=new IndexToString()
      .setInputCol("prediction")
      .setOutputCol("predictedLabel")
      .setLabels(labelIndexer.labels)

    val Array(trainingData,testData)=df.randomSplit(Array(0.7,0.3))

    //  创建决策树模型DecisionTreeClassifier,通过setter的方法来设置决策树的参数，也可以使用ParamMap来设置
    //  这里仅需要特征列(FeaturesCol)喝和待预测列(LabelCol)，具体可以设置的参数可以通过explainParams()来获取

    val dtClassifier=new DecisionTreeClassifier()
      .setLabelCol("indexedLabel")
      .setFeaturesCol("indexedFeatures")


    //  创建机器学习流水线
    val lrPipeline=new Pipeline()
      .setStages(Array(labelIndexer,featureIndexer,dtClassifier,labelConverter))

    val dtPipeelineModel=lrPipeline.fit(trainingData)
    val dtPredictions=dtPipeelineModel.transform(testData)
    //  查看结果(第一列为预测的标签，并且已经转回为字符串,第二列是正确的标签，第三列为特征向量)
    dtPredictions.select("predictedLabel","label","features").show(false)

    //  查看模型的正确型evaluator:评估者
    val evaluator=new MulticlassClassificationEvaluator()
      .setLabelCol("indexedLabel")        //  正确的标签，索引型
      .setPredictionCol("prediction")     //  预测的标签，索引型















    spark.close()
  }

}

//  定义样例类
//case class lris(features:org.apache.spark.ml.linalg.Vector,label:String)
