package com.xinqing.bigdata.ml.piepline

import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.{SparkConf}


/**
  * @Author:CHQ
  * @Date:2020 /8/5 14:22
  * @Description
  */
object PipelineComponentExample {

  def main(args: Array[String]): Unit = {

    //创建SparkSession,SparkSession中包含了SparkContext
    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("spark sql test1")
    val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    //创建DataFrame(字段名称)
    val training: DataFrame = sparkSession.createDataFrame(Seq(
      (1.0, Vectors.dense(0.0, 1.1, 0.1)),
      (0.0, Vectors.dense(2.0, 1.1, -1.0)),
      (0.0, Vectors.dense(2.0, 1.3, 1.0)),
      (1.0, Vectors.dense(0.0, 1.2, -0.5))
    )).toDF("label", "features")

    //结果
    //[1.0,[0.0,1.1,0.1]]
    //[0.0,[2.0,1.3,1.0]]
    //[1.0,[0.0,1.2,-0.5]]
    //[0.0,[2.0,1.1,-1.0]]
    /*dataFrame.foreach({
      Row => {
        println(Row.toString())
      }
    })*/

    val lr: LogisticRegression = new LogisticRegression()

    /* LogisticRegression params:
       aggregationDepth: suggested depth for treeAggregate (>= 2) (default: 2)
     elasticNetParam: the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty (default: 0.0)
     family: The name of family which is a description of the label distribution to be used in the model. Supported options: auto, binomial, multinomial. (default: auto)
     featuresCol: features column name (default: features)
     fitIntercept: whether to fit an intercept term (default: true)
     labelCol: label column name (default: label)
     maxIter: maximum number of iterations (>= 0) (default: 100)
     predictionCol: prediction column name (default: prediction)
     probabilityCol: Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities (default: probability)
     rawPredictionCol: raw prediction (a.k.a. confidence) column name (default: rawPrediction)
     regParam: regularization parameter (>= 0) (default: 0.0)
     standardization: whether to standardize the training features before fitting the model (default: true)
     threshold: threshold in binary classification prediction, in range [0, 1] (default: 0.5)
     thresholds: Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values > 0 excepting that at most one value may be 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class's threshold (undefined)
     tol: the convergence tolerance for iterative algorithms (>= 0) (default: 1.0E-6)
     weightCol: weight column name. If this is not set or empty, we treat all instance weights as 1.0 (undefined)
 */
    //打印原始模型参数，并重置部分参数值
    println("LogisticRegression params:" + lr.explainParams())
    //设置最大迭代次数和正则参数
    lr.setMaxIter(10).setRegParam(0.01)

    // DataFrame也可以叫Dataset[Row],每一行的类型是Row
    val model: LogisticRegressionModel = lr.fit(training)

    //准备模型参数
    val paramMap1: ParamMap = ParamMap(
      lr.maxIter -> 80,
      lr.regParam -> 0.1,
      lr.threshold -> 0.55
    )
    val paramMap2: ParamMap = ParamMap(lr.probabilityCol -> "myProbability")
    val paramMap: ParamMap = paramMap1 ++ paramMap2

    //生成训练数据的预测模型
    val regressionModel: LogisticRegressionModel = lr.fit(training, paramMap)


    //准备测试数据（向量数据和标签），且向量数据列features作为实际预测列myProbability，标签列label作为prediction列
    val test: DataFrame = sparkSession.createDataFrame(Seq(
      (1.0, Vectors.dense(-1, 1.5, 1.3)),
      (0.0, Vectors.dense(3.0, 2.0, -0.1)),
      (1.0, Vectors.dense(0.0, 2.2, -1.5))
    )).toDF("label", "features")

    //针对features字段（向量数据）做transform预测。features-》myProbability，label-》prediction
    regressionModel.transform(test)
      .select("features", "label", "myProbability", "prediction")
      .collect()
      .foreach({
        //Row作为spark sql中的类型，表示获取到的一行，后经模式匹配，拿到想要的值
        case Row(features: Vector, label: Double, prob: Vector, prediction: Double) => {
          println(s"($label,$features) -> $prob,$prediction")
        }
      })
    //预测结果
    // (1.0,[-1.0,1.5,1.3]) -> [0.12057154405357391,0.879428455946426],1.0
    // (0.0,[3.0,2.0,-0.1]) -> [0.9923178731504473,0.00768212684955266],0.0
    //(1.0,[0.0,2.2,-1.5]) -> [0.7486054956351106,0.25139450436488947],0.0

  }
}
