package com.atguigu.userprofile.pipeline

import org.apache.spark.ml.{Pipeline, PipelineModel, Transformer}
import org.apache.spark.ml.classification.{DecisionTreeClassificationModel, DecisionTreeClassifier}
import org.apache.spark.ml.feature.{IndexToString, StringIndexer, StringIndexerModel, VectorAssembler, VectorIndexer}
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.DataFrame

class MyPipeline {

    var  pipeline:Pipeline=null

    var pipelineModel:PipelineModel=null

    var labelColName=""

    var maxCategories=10

  //// 以下为参数 ////////////////////

  // 最大分支数
  private var maxBins=20
  // 最大树深度
  private var maxDepth=5
  //最小分支包含数据条数
  private var minInstancesPerNode=1
  //最小分支信息增益
  private var minInfoGain=0.0


  def setMaxBins(maxBins:Int): MyPipeline ={
    this.maxBins=maxBins
    this
  }
  def setMaxDepth(maxDepth:Int): MyPipeline ={
    this.maxDepth=maxDepth
    this
  }

  def setMinInstancesPerNode(minInstancesPerNode:Int): MyPipeline ={
    this.minInstancesPerNode=minInstancesPerNode
    this
  }

  def setMinInfoGain(minInfoGain:Double): MyPipeline ={
    this.minInfoGain=minInfoGain
    this
  }



  def setMaxCategories(maxCategories:Int): MyPipeline ={
        this.maxCategories=maxCategories
        this
    }

    def setLabelColName(labelColName:String): MyPipeline ={
        this.labelColName=labelColName
        this
    }

    var featureColNames:Array[String]=null
    def setFeatureColName(featureColNames:Array[String]): MyPipeline ={
        this.featureColNames=featureColNames
        this
    }


  //  1  构造一个流水线
   def  init(): MyPipeline ={
       pipeline = new Pipeline().setStages(Array( //  放师徒四人
           createLabelIndexer(),
           createFeatureAssemble(),
           createFeatureIndexer(),
           createClassifier()
       ))
       this


   }

   // 大师兄
    // 功能： 标签的索引化
    //  索引化（矢量化)  把标签的原值转为矢量值 ， 矢量值 就是0,1,2,3,4...
    //  出现概率越多 分配的矢量值越小
    //InputCol  给label列名
    //OutputCol  自己命名
    def  createLabelIndexer(): StringIndexer ={
         val stringIndexer = new StringIndexer()
        stringIndexer.setInputCol(labelColName)
        stringIndexer.setOutputCol("label_index")
        stringIndexer

    }
    // 二师兄
    // 把特征里聚合为一列
    // input  特征字段 集合
    // output  自己命名
    def createFeatureAssemble(): VectorAssembler ={
            val assembler = new VectorAssembler()
            assembler.setInputCols(featureColNames)
            assembler.setOutputCol("feature_assemble")
        assembler
    }

    // 三师弟 特征索引
    //  把聚合后的特征 变为 矢量值
    //  input   特征聚合字段名
    //  output   自己命名
    def createFeatureIndexer(): VectorIndexer ={
            val vectorIndexer = new VectorIndexer()
            vectorIndexer.setInputCol("feature_assemble")
            vectorIndexer.setOutputCol("feature_index")
            vectorIndexer.setMaxCategories(maxCategories)   // 凡是超过MaxCategories 都视为连续值特征  小于等于MaxCategories 视为离散特征
            vectorIndexer
    }

    // 师父
    //分类器
    // 构造分类器 使用分类器
    //
    def  createClassifier(): DecisionTreeClassifier ={
            val decisionTreeClassifier = new  DecisionTreeClassifier()
        decisionTreeClassifier.setLabelCol("label_index")   //输入列
        decisionTreeClassifier.setFeaturesCol("feature_index")  //输入列
        decisionTreeClassifier.setPredictionCol("prediction_index") //输出列
        decisionTreeClassifier.setImpurity("gini")
        decisionTreeClassifier.setMaxBins(maxBins)
        decisionTreeClassifier.setMinInfoGain(minInfoGain)
        decisionTreeClassifier.setMaxDepth(maxBins)
        decisionTreeClassifier.setMinInstancesPerNode(minInstancesPerNode)

        decisionTreeClassifier
    }



 //  2   训练
    def  train(dataFrame:DataFrame): Unit ={
        pipelineModel = pipeline.fit(dataFrame)
    }

  //3  预测
    def predict(dataFrame:DataFrame): DataFrame ={
        val predictedDateFrame: DataFrame = pipelineModel.transform(dataFrame)
        predictedDateFrame
    }

  // 打印决策树
  def  printTree(): Unit ={
    val transformer: Transformer = pipelineModel.stages(3)
    val model: DecisionTreeClassificationModel = transformer.asInstanceOf[DecisionTreeClassificationModel]
    println(model.toDebugString)
  }

  // 打印特征的权重
  def  printFeatureWeight(): Unit ={
    val transformer: Transformer = pipelineModel.stages(3)
    val model: DecisionTreeClassificationModel = transformer.asInstanceOf[DecisionTreeClassificationModel]
    println(model.featureImportances)
  }



  // 4  评估
  def  printEvaluate(predictedDataFrame:DataFrame): Unit ={
    val predictionAndLabelsRDD: RDD[(Double, Double)] = predictedDataFrame.rdd.map { row =>
      val label: Double = row.getAs[Double]("label_index")
      val prediction: Double = row.getAs[Double]("prediction_index")
      (prediction, label)
    }

    val multiclassMetrics = new MulticlassMetrics(predictionAndLabelsRDD)
    println("总准确率："+multiclassMetrics.accuracy)
    val labels: Array[Double] = multiclassMetrics.labels
    for (label <- labels ) {
      println("label为 "+label+"精确率"+multiclassMetrics.precision(label))
      println("label为 "+label+"召回率"+multiclassMetrics.recall(label))
    }





  }



//   5  保存模型
  def saveModel(savePath:String): Unit ={
    pipelineModel.write.overwrite().save(savePath);
  }





  // 6  加载模型

  def loadModel(savePath:String): MyPipeline ={
    pipelineModel = PipelineModel.load(savePath)
    this
  }

  // 7  转换原值

  def convertOrigin(dataframe:DataFrame): DataFrame ={
    val transformer: Transformer = pipelineModel.stages(0)
     val stringIndexerModel: StringIndexerModel = transformer.asInstanceOf[StringIndexerModel]
      val labels: Array[String] = stringIndexerModel.labels


      val indexToString = new IndexToString()
      indexToString.setInputCol("prediction_index")
      indexToString.setOutputCol("prediction_origin")
      indexToString.setLabels(labels)

       val convertedDataframe: DataFrame = indexToString.transform(dataframe)
       return convertedDataframe;
  }



}
