package sparkml_study

import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.feature.{IndexToString, StringIndexer, VectorAssembler, VectorIndexer}
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.DoubleType
import org.apache.spark.sql.{Row, SparkSession}

object FenLeiSuanFa {
  def main(args: Array[String]): Unit = {
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("分类算法,这里示范的好像是逻辑斯蒂回归分类器")
      .getOrCreate()

    //  读取数据
  val data=spark.read.option("inferSchema","true")
    .csv("hdfs://192.168.40.110:9000/spark_test_data/test_data.csv")
    .toDF("c0","c1","c2","c3","label")

    //  更改数据类型
    val data_double=data.select(
      col("c0").cast(DoubleType),col("c1").cast(DoubleType),
      col("c2").cast(DoubleType),col("c3").cast(DoubleType),
      col("label")
    )

    //  利用VectorAssembler将4个特征组合为向量
    val assember=new VectorAssembler()
      .setInputCols(Array("c0","c1","c2","c3"))
      .setOutputCol("features")

    val assember_data=assember.transform(data_double).select("features","label")

    assember_data.show(false)


    //  分别获取标签列和特征列，进行索引并进行重命名
    val labelindexer=new StringIndexer()      //  将字符串索引化
      .setInputCol("label")
      .setOutputCol("indexedLabel")
      .fit(assember_data)

    val featureindexer=new VectorIndexer()      //  将向量索引化
      .setInputCol("features")
      .setOutputCol("indexedFeatures")
      .fit(assember_data)

    //  设置分类器的参数
    //  设置LogisticRegression算法的参数，这里设置了循环次数为100次，规范化为0.3等，具体可以设置的参数可以通过
    //  explainParams()来获取
    val lr=new LogisticRegression()
      .setLabelCol("indexedLabel")
      .setFeaturesCol("indexedFeatures")
      .setMaxIter(100)
      .setRegParam(0.3)
      .setElasticNetParam(0.8)

    //  设置一个IndexToString的转换器,把预测的类别重新转化成字符串
    val labelConverter=new IndexToString()
      .setInputCol("prediction")
      .setOutputCol("predictedLabel")
      .setLabels(labelindexer.labels)

    //  创建机器学习流水线，前面的输出是后面一个的输入
    val lrPipeline=new Pipeline()
      .setStages(Array(labelindexer,featureindexer,lr,labelConverter))

    //  切分数据，并且训练数据
    val Array(ranining,test) =assember_data.randomSplit(Array(0.7,0.3))
    //  Pipeline本质是一个评估器，要fit才得到转化器
    val result=lrPipeline.fit(ranining).transform(test)


    //  收集结果输出每一行的结果
    //  这下面的向量(数据类型)需要自己取上面导包
   result.select("predictedLabel","label","features","probability")
     .collect()
     .foreach{
       case Row(predictedLabel:String,label:String,features:Vector,prob:Vector) =>
                println(s"($label,$features) --> prob=$prob,predictedLabel=$predictedLabel")
     }









    spark.close()
  }

}
