package com.lqf.ml.tree

import java.time.LocalDate
import java.time.format.DateTimeFormatter

import com.lqf.ml.recommand.AdDataServer
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.GradientBoostedTrees
import org.apache.spark.mllib.tree.configuration.BoostingStrategy
import org.apache.spark.sql.SparkSession
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.{ArrayBuffer, ListBuffer}

object GBTCtrPredict {
  def parseFeatures(features: Array[String]): List[(Int, String)] = {
    var featureList = new ListBuffer[(Int, String)]
    for (i <- 0 until features.length) {
      featureList += i -> features(i).toString
    }
    featureList.toList
  }
  def getSparkSession(runMode : String,master : String): SparkSession ={
    val conf = new SparkConf()
    if(runMode.equals("local")){
      conf.setMaster("local").setAppName("alsLocal")
    }
    else{
      conf.setMaster(master).setAppName("gbt")
    }
    SparkSession.builder().config(conf).getOrCreate()
  }
  def getLocalDate(date : String): LocalDate ={
    LocalDate.parse(date, DateTimeFormatter.ofPattern("yyyy-MM-dd"))
  }
  def main(args: Array[String]): Unit = {
    val ss = getSparkSession("local",null)
    val sc = ss.sparkContext
    val adDataServer = new AdDataServer(ss,"/alldata")
    val startDate = "2018-01-03"
    val endDate = "2018-01-03"
    val sLocalDate = getLocalDate(startDate)
    val eLocalDate = getLocalDate(endDate)
    val clickCounters = List("ad_adclick", "cp_adclick","ad_adview", "cp_adview")
    val baseDataSet = adDataServer.getBaseDataSet(sLocalDate,eLocalDate,clickCounters).distinct()
    baseDataSet.persist()
    val clickDataSet = adDataServer.getClickedData(baseDataSet)
    val impDataSet = adDataServer.getImpressionData(sLocalDate,eLocalDate)
    baseDataSet.unpersist()


    clickDataSet.show()
    impDataSet.show()
    val c= clickDataSet.count()
    val i = impDataSet.count()

    val justViewDataSet = impDataSet.toJavaRDD.subtract(clickDataSet.toJavaRDD)

    justViewDataSet.rdd.foreach(println)
    val j = justViewDataSet.count()
    println(i)
    println(j)
    println(c)
    System.exit(1)

    //redis cache trans

    //impData distinct by join

    //union


    var data = sc.textFile(this.getClass.getClassLoader.getResource("GB_sample_data.txt").getFile)
    println("total records:"+data.count())

    //trains
    var splitData = data.randomSplit(Array(0.8,0.2),seed = 30L)
    var trainData = splitData(0)
    var testData = splitData(1)
    println("train records:"+trainData.count())
    println("test records:"+testData.count())
    trainData.cache()
    testData.cache()

    var trainRDD = trainData.map{line =>
      var tokens = line.split(",")
      var key = tokens(0)+"::"+tokens(1)
      //one-hot,3~5
      var hotFeatures = tokens.slice(2,7)
      //numerical
      val numericalFeatures = tokens.slice(8,tokens.size-1)
      (key,hotFeatures,numericalFeatures)
    }
    println(trainRDD.take(1))
    // classical feature trans to Id map
    var train_map_rdd = trainRDD.map(x =>
      parseFeatures(x._2)
    )
    println(train_map_rdd.take(1))

    var oneMap = train_map_rdd.flatMap( x => x).distinct().zipWithIndex().collectAsMap()
    println("num of feature "+oneMap.size)
    //create one-hot for train data
    var oh_train_rdd = trainRDD.map{ case (key,cateorical_feature,numerical_fealtures) =>
        var cat_feature_indexed = parseFeatures(cateorical_feature)
        var cat_feature_ohe = new ArrayBuffer[Double]()
        for(k <- cat_feature_indexed){
          if(oneMap contains k){
            cat_feature_ohe += oneMap.get(k).get.toDouble
          }else{
            cat_feature_ohe += 0.0
          }
        }
      var numerical_features_dbl  = numerical_fealtures.map{x =>
        var x1 = if (x.toInt < 0) "0" else x
        x1.toDouble}
      var features = cat_feature_ohe.toArray ++  numerical_features_dbl
      LabeledPoint(key.split("::")(1).toInt, Vectors.dense(features))
    }
    oh_train_rdd.foreach(println)
    System.exit(1)
    //train model
    val boostingStrategy = BoostingStrategy.defaultParams("Classification")
    boostingStrategy.numIterations = 100
    boostingStrategy.treeStrategy.numClasses = 2
    boostingStrategy.treeStrategy.maxDepth = 10
    boostingStrategy.treeStrategy.categoricalFeaturesInfo = Map[Int, Int]()
    val model = GradientBoostedTrees.train(oh_train_rdd,boostingStrategy)
    //test data one-hot encode
    var testRDD = testData.map{line =>
      var tokens = line.split(",")
      var key = tokens(0)+"::"+tokens(1)
      //one-hot,3~5
      var hotFeatures = tokens.slice(2,7)
      //numerical
      val numericalFeatures = tokens.slice(8,tokens.size-1)
      (key,hotFeatures,numericalFeatures)
    }
    var ohe_test_rdd = testRDD.map{case(key,cateorical_feature,numerical_features) =>
        var cat_features_indexed = parseFeatures(cateorical_feature)
        var cat_feature_ohe = new ArrayBuffer[Double]()
        for(k <- cat_features_indexed){
          if(oneMap contains k){
            cat_feature_ohe += oneMap.get(k).get.toDouble
          }else{
            cat_feature_ohe += 0.0
          }
        }
      var numerical_features_dbl  = numerical_features.map{x =>
        var x1 = if (x.toInt < 0) "0" else x
        x1.toDouble}
      var features = cat_feature_ohe.toArray ++  numerical_features_dbl
      LabeledPoint(key.split("::")(1).toInt, Vectors.dense(features))
    }
    //验证测试数据集
    var b = ohe_test_rdd.map {
      y => var s = model.predict(y.features)
        (s,y.label,y.features)
    }
    b.take(10).foreach(println)
    var predictions = ohe_test_rdd.map(lp =>
      model.predict(lp.features)
    )

    predictions.take(10).foreach(println)
    var predictionAndLabel = predictions.zip( ohe_test_rdd.map(_.label))
    var accuracy = 1.0 * predictionAndLabel.filter(x => x._1 == x._2 ).count/ohe_test_rdd.count
    println("GBTR accuracy " + accuracy)
  }
}
