package com.bigdata.spark.ml
import org.apache.spark.sql.Row
import org.apache.spark.sql.SparkSession
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.ml.feature.{HashingTF, IndexToString, StringIndexer, Tokenizer, VectorIndexer}
import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.classification.LogisticRegressionModel
import org.apache.spark.ml.classification.{BinaryLogisticRegressionSummary, LogisticRegression}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions
/**
 * @author Gerry chan
 * @version 1.0
 * 2021/01/05 15:01
 * 逻辑斯蒂回归分类器。使用Iris数据集
 * 教程地址：https://www.bilibili.com/video/BV1Yt411A7o6?p=7
 *
 */
object LogisticRegressionIris {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("LogisticRegressionIris")
      .getOrCreate()

    //读取数据，简要分析
    import spark.implicits._
    val data = spark.sparkContext.textFile("datas/iris.data.txt")
      .map(_.split(","))
      .map(p => Iris(Vectors.dense(p(0).toDouble, p(1).toDouble, p(2).toDouble, p(3).toDouble), p(4).toString())
      ).toDF()

    //查看下数据
    //data.show()

    //得到的数据注册临时表iris,通过SQL语句进行数据查询
    data.createOrReplaceTempView("iris")
    val df = spark.sql("select * from iris where label!='Iris-setosa'")

    //标签调到前面，中间由冒号隔开
    df.map(t => t(1)+ ":" +t(0)).collect().foreach(println)

    //1) 分别获取标签列和特征列，进行索引， 并进行重命名
    val labelIndexer = new StringIndexer().setInputCol("label")
      .setOutputCol("indexedLabel").fit(df)

    val featureIndexer = new VectorIndexer()
      .setInputCol("features")
      .setOutputCol("indexedFeatures").fit(df)

    //2) 数据集进行划分，这里整个数据集的70%作为测试集， 30%作为训练集
    val Array(trainingData, testData) = df.randomSplit(Array(0.7,0.3))

    //3) 设置logistic的参数
    val lr = new LogisticRegression().setLabelCol("indexedLabel")
      .setFeaturesCol("indexedFeatures")
      .setMaxIter(10)
      .setRegParam(0.3)
      .setElasticNetParam(0.8)

    //4) 设置一个labelConverter,把预测的类别重新转换成字符型

    val labelConverter = new IndexToString().setInputCol("prediction")
      .setOutputCol("predictedLabel")
      .setLabels(labelIndexer.labels)

    //5) 构建pipeline,设置stage,调用fit() 来训练模型
    val lrPipeline = new Pipeline()
      .setStages(Array(labelIndexer, featureIndexer, lr, labelConverter))

    //完成算法训练得到模型
    val lrPipelineModel = lrPipeline.fit(trainingData)

    //6）PipelineModel调用transform() 来进行预测
    val lrPredictions = lrPipelineModel.transform(testData)

    //7 输出预测的结果
    /*
      关于各字段含义：
      predictedLabel:预测的标签
      label:原本标签
      features: 特征向量
      probability:概率
     */

    lrPredictions.select("predictedLabel", "label",
      "features", "probability").collect().foreach{
      case Row(predictedLabel:String, label:String, features:Vector, prob:Vector)
        => println(s"($label, $features) --> prob=$prob,predictedLabel=$predictedLabel")
    }

    //模型评估，计算预测准确性和错误率
    val evaluator = new MulticlassClassificationEvaluator()
      .setLabelCol("indexedLabel")
      .setPredictionCol("prediction")

    val lrAccuracy = evaluator.evaluate(lrPredictions)
    println("Test Error=" + (1.0 - lrAccuracy))

    //通过mode来获取我们训练得到的逻辑斯蒂模型
    val lrModel = lrPipelineModel.stages(2)
      .asInstanceOf[LogisticRegressionModel]

    println("Coefficients: " + lrModel.coefficients + " Intercept:"
      + lrModel.intercept + " numClasses: " + lrModel.numClasses
      + " numFeatures:" + lrModel.numFeatures)

  }
  //不要将case Class定义在main 方法中与toDF一起使用，或与使用toDF定义在同一函数中。 不然不能进行转换
  case class Iris(features:org.apache.spark.ml.linalg.Vector, label:String)
}
