package com.atguigu.userprofile.ml.pipeline

import org.apache.spark.ml.{Pipeline, PipelineModel, Transformer}
import org.apache.spark.ml.classification.{DecisionTreeClassificationModel, DecisionTreeClassifier}
import org.apache.spark.ml.feature.{IndexToString, StringIndexer, StringIndexerModel, VectorAssembler, VectorIndexer}
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.DataFrame

class MyPipeline {




  //// 以下为参数 ////////////////////
  //最大分类树（用于识别连续值特征和分类特征）
  private var maxCategories=20
  // 最大分支数
  private var maxBins=5
  // 最大树深度
  private var maxDepth=5
  //最小分支包含数据条数
  private var minInstancesPerNode=1
  //最小分支信息增益
  private var minInfoGain=0.0


  def setMaxBins(maxBins:Int): MyPipeline ={
    this.maxBins=maxBins
    this
  }
  def setMaxDepth(maxDepth:Int): MyPipeline ={
    this.maxDepth=maxDepth
    this
  }

  def setMinInstancesPerNode(minInstancesPerNode:Int): MyPipeline ={
    this.minInstancesPerNode=minInstancesPerNode
    this
  }

  def setMinInfoGain(minInfoGain:Double): MyPipeline ={
    this.minInfoGain=minInfoGain
    this
  }



  def setMaxCategories(maxCategories:Int): MyPipeline ={
      this.maxCategories=maxCategories
      this
   }
    var  labelColName=""

    def setLabelColName(labelColName:String): MyPipeline ={
        this.labelColName=labelColName
      this
    }

  var  featureColName:Array[String]=null

  def setFeatureColName(featureColName:Array[String]): MyPipeline ={
    this.featureColName=featureColName
    this
  }

     var  pipeline:Pipeline=null;

     var  pipelineModel:PipelineModel=null;

  // 初始化 pipeline
     def init(): MyPipeline ={
       pipeline=new Pipeline().setStages(Array(
         createLabelIndexer(),
         createFeatureAssembler(),
         createFeatureIndexer(),
         createClassifier()))
       this
     }


  //  创建师徒四人

  // 大师兄
  //  label 参考答案
  //  把label进行矢量化处理
  //  把label原值按照出现的概率进行排布 0,1,2,3,....  概率越大 次序越小
  // 参数  InputCol   label列的 列名
    //    OutputCol   转换为矢量的新label列 的列名
  def  createLabelIndexer(): StringIndexer ={
       val indexer = new StringIndexer()
       indexer.setInputCol(labelColName)
       indexer.setOutputCol("label_index")

       indexer
  }


  // 二师兄
  //   特征聚合
  //  把多个特征字段聚合为一个特征列
  //
  // 参数  InputCol   特征列的 多个列名
  //      OutputCol   聚合的特征新列名
  def  createFeatureAssembler():VectorAssembler   ={
        val vectorAssembler = new VectorAssembler()
         vectorAssembler.setInputCols(featureColName)
         vectorAssembler.setOutputCol("feature_assemble")
         vectorAssembler
  }

  // 三师弟
 //  特征索引
  //   把特征聚合列转为矢量值  //只处理离散特征  连续值特征保持原值不变
  //  把label原值按照出现的概率进行排布 0,1,2,3,....  概率越大 次序越小
  // 参数  InputCol  聚合的特征新列名
 //      OutputCol    矢量化聚合的特征新列名
   //    MaxCategories   区分连续值特征和离散特征    超过此值 一律视为连续值特征
  def createFeatureIndexer(): VectorIndexer ={
   val vectorIndexer = new VectorIndexer()
   vectorIndexer.setInputCol("feature_assemble")
   vectorIndexer.setOutputCol("feature_index")
   vectorIndexer.setMaxCategories(maxCategories)
   vectorIndexer
 }

  //师父
  //  分类器  真正进行分类计算的工序
  //
  def createClassifier(): DecisionTreeClassifier ={
       val classifier = new DecisionTreeClassifier()
    classifier.setFeaturesCol("feature_index")
    classifier.setLabelCol("label_index")
    classifier.setImpurity("gini")
    classifier.setPredictionCol("prediction_index")
    classifier.setMaxBins(maxBins)
    classifier.setMinInfoGain(minInfoGain)
    classifier.setMinInstancesPerNode(minInstancesPerNode)
    classifier.setMaxDepth(maxDepth)

    //其他 优化
    classifier

  }

  //  训练
  def  train(dataFrame :DataFrame): Unit ={
    pipelineModel = pipeline.fit(dataFrame)
  }


  //  预测
  // 会多出一个预测结果列
  def predict(dataFrame :DataFrame): DataFrame ={
    val predictedDataFrame: DataFrame = pipelineModel.transform(dataFrame)
    predictedDataFrame
  }


  // 打印决策树
  def  printTree(): Unit ={
     val stages: Array[Transformer] = pipelineModel.stages
     val transformer: Transformer = stages(3)
     val model: DecisionTreeClassificationModel = transformer.asInstanceOf[DecisionTreeClassificationModel]
    println(model.toDebugString)
  }

  //打印特征的权重
  def  printFeatureWeight(): Unit ={
    val stages: Array[Transformer] = pipelineModel.stages
    val transformer: Transformer = stages(3)
    val model: DecisionTreeClassificationModel = transformer.asInstanceOf[DecisionTreeClassificationModel]
    println(model.featureImportances)
  }

  //  生成评估报告
  def  printEuvluateReport(dataFrame: DataFrame): Unit ={
    val predictionAndLabelRDD: RDD[(Double, Double)] = dataFrame.rdd.map { row =>
      val labelValue: Double = row.getAs[Double]("label_index")
      val predictionValue: Double = row.getAs[Double]("prediction_index")
      (predictionValue, labelValue)
    }

      val metrics = new MulticlassMetrics(predictionAndLabelRDD)
      println(s"准确率: ${metrics.accuracy}")  //准确率
      val labels: Array[Double] = metrics.labels
      for (label <- labels ) {
        println(s" label 为 ${label}精确率: ${metrics.precision(label)}")
        println(s" label 为 ${label}召回率: ${metrics.recall(label)}")

      }




  }


  //  模型存储
  def saveModel(path:String): Unit ={
    pipelineModel.write.overwrite().save(path)
  }

  def loadModel(path:String): MyPipeline ={
    pipelineModel=PipelineModel.load(path)
    this
  }

  //  模型加载

  //  转换原值
  //indexToString
  //参数
  // input   矢量值列
  // output   原值列
  // labels   矢量和原值的映射关系  找大师兄
  def  convertOrgin(dataFrame: DataFrame): DataFrame ={
      val indexToString = new IndexToString()
    indexToString.setInputCol("prediction_index")
    indexToString.setOutputCol("prediction_origin")

    val transformer: Transformer = pipelineModel.stages(0)
    val indexerModel: StringIndexerModel = transformer.asInstanceOf[StringIndexerModel]
    val labels: Array[String] = indexerModel.labels
    indexToString.setLabels(labels)

    val convertedDF: DataFrame = indexToString.transform(dataFrame)
    convertedDF
  }




}
