package com.lvmama.monkey.buy

import com.lvmama.monkey.common.logging.LazyLogging
import com.lvmama.monkey.common.utils.JDBCUtils.JDBCTemplate
import com.lvmama.monkey.common.utils.spark.SparkApplication
import com.lvmama.monkey.config.JobConfig
import com.lvmama.monkey.common.utils.Conversion._
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.DecisionTree
import org.apache.spark.mllib.tree.model.DecisionTreeModel
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import com.lvmama.monkey.common.utils.JDBCUtils.ConnetionPool._


/**
  * Created by hejing on 2017/10/31.
  */
class PurchasingPower(config: JobConfig) extends SparkApplication with LazyLogging{
  override var appName: String = "PurchasingPower"
  override var sparkConfig: Map[String, String] = config.spark
  val JDBCDefault = JDBCTemplate.JDBCDefaultSet
  val connP = JDBCTemplate.getConProperties

  def execute(): Unit = {
    sparkConfig += ("spark.app.name" -> appName)
    sparkConfig += ("spark.master" -> "local[*]")
    //    sparkConfig+=("spark.default.parallelism" -> "40")
    //    sparkConfig +=("spark.sql.shuffle.partitions" -> "300" )
    withSparkContext {
      sc =>
        val sqlContext = new SQLContext(sc)
        import sqlContext.implicits._
        val numClasses=sc.textFile("hdfs://hadoop/user/monkey/Purchasing/PurchasingTrainDataResult/part-*")
          .map { line =>
            line.split(',').map(_.toDouble).last
          }.max().toInt+1

        val rdata = sc.textFile("hdfs://hadoop/user/monkey/Purchasing/PurchasingTrainDataResult/part-*")
          .map { line =>
            val values = line.split(',').map(_.toDouble)
            val featureVector = Vectors.dense(values.init)
            val label = values.last - 1
            LabeledPoint(label, featureVector)
          }

        //选择训练集、交叉检验集、测试集
        val Array(trainData, cvData, testData) = rdata.randomSplit(Array(0.8, 0.1, 0.1))
        buildDecisionTree(trainData, cvData, testData)
        //      evaluate(trainData, cvData)           //模型评估

        /**
          * 决策树模型建立
          * 对每一份数据建立模型时，都需要随机选出部分数据来调整模型参数到最优。
          * 通过交叉验证的方式调整参数。
          *
          * @param trainData
          * @param cvData
          */
        def buildDecisionTree(trainData: RDD[LabeledPoint], cvData: RDD[LabeledPoint], testData: RDD[LabeledPoint]) = {
          def getMetrics(model: DecisionTreeModel, data: RDD[LabeledPoint]) = {
            val predictionsAndLabels = data.map {
              example =>
                (model.predict(example.features), example.label)
            }
            new MulticlassMetrics(predictionsAndLabels)
          }

          val model = DecisionTree.trainClassifier(
            trainData, numClasses, Map[Int, Int](), "entropy", 20, 10
          )
          val matrics = getMetrics(model, cvData)
          //println(matrics.confusionMatrix)

          val selectDateTemp2 = sc.textFile("hdfs://hadoop/user/monkey/Purchasing/predictDataResult/part-*")
          val rdata2 = selectDateTemp2.map { line =>
            val values = line.split(',').map(_.toDouble)
            val featureVector = Vectors.dense(values)
            val redss = model.predict(featureVector)
            (line, redss)
          }
            .map { x =>
              (x._1.split(",")(0).toLong, x._1.split(",")(1), x._1.split(",")(2).toInt, x._1.split(",")(3).toDouble, x._1.split(",")(4).toDouble, x._2)//x.split(",")(0).toInt.asInstanceOf[Long]
            }.toDF("product_id", "date", "week", "sense_type", "good_comment", "sales_num")
          val sql = "truncate from pro_predict"
          upsertDF2Mysql(rdata2, sql)
          rdata2.insertDF2Mysql("pro_predict", "monkey", SaveMode.Append)
        }

        /**
          * 模型评估
          *
          * @param trainData 训练数据
          * @param cvData    交叉验证数据
          */
        def evaluate(trainData: RDD[LabeledPoint], cvData: RDD[LabeledPoint]): Unit = {
          val evaluations =
            for (impurity <- Array("gini", "entropy");
                 depth <- Array(1, 20);
                 bins <- Array(10, 300))
              yield {
                val model = DecisionTree.trainClassifier(
                  trainData, numClasses, Map[Int, Int](), impurity, depth, bins)
                val predictionsAndLabels = cvData.map(example =>
                  (model.predict(example.features), example.label)
                )
                val accuracy =
                  new MulticlassMetrics(predictionsAndLabels).precision
                ((impurity, depth, bins), accuracy)
              }
          evaluations.sortBy(_._2).reverse.foreach(println)
        }
    }
  }
}

object PurchasingPower {
  val config = JobConfig()
  def apply(): Unit = new PurchasingPower(config).execute()
}
