package Sun


//import AdDemo.MyGradient

import java.io.{File, PrintWriter}
import java.util

import AdDemo.MyGradient
import Sun.{BasicFunction, Separate}
import breeze.optimize.StochasticGradientDescent

import org.apache.spark.mllib.classification.LogisticRegressionModel
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics
import org.apache.spark.mllib.linalg.{DenseVector, Vectors, Vector}
import org.apache.spark.mllib.optimization._
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
import Array._
import scala.io.Source

/**
 * Created by Sun on 2016/6/2.
 */
object MutiTouchTrain extends Serializable{

  def main(args: Array[String]): Unit = {



    val sparkConf = new SparkConf().setAppName("MutiTouch").set("spark.hadoop.validateOutputSpecs", "false")
    val sc = new SparkContext(sparkConf)
    //设置初始文件夹位置
    val path =  sc.broadcast(args(0))
    val mode = sc.broadcast(args(1))


    BasicFunction.basicpath = path.value
    BasicFunction.mode = mode.value


    val (train,test) = prepareData(sc)



    //检测下那个不符合要求

    //选取适当比率
    val splits =  train.randomSplit(Array(0.9, 0.1), seed = 11L)


    // Append 1 into the training data as intercept.增加偏移量，但并不知道怎么用
    val traindata = splits(0).map(x => (x.label, MLUtils.appendBias(x.features)))

    //检测下那个不符合要求
    val (weightsResult, loss) = training(sc,traindata,null)

    //给出相应的模型，进行预测 来进行判断
    val numdata = traindata.count()

    //进行测试，训练集和测试集都进行写入
    val trainRoc =   testSample(sc,train)
    val trainTest =   testSample(sc,test)

    //结果写入
    val jedis = RedisClient.pool.getResource
    jedis.select(BasicFunction.dbIndex)
    jedis.set("trainRoc", trainRoc.toString)
    jedis.set("trainTest", trainTest.toString)

    jedis.close()

  }

  //给出training的方法，进行模型训练
  //第二个参数为需要训练的数据 为标准数据label型
  //第三个参数为权重参数，如果为null则采用全是0的参数
  def training(sc:SparkContext,training:RDD[(Double, Vector)],Weights:Vector): (Vector, Array[Double]) =
  {
    //val sv2: Vector = Vectors.sparse(3, Seq((0, 1.0), (2, 3.0)))

    // Run training algorithm to build the model
    val numCorrections = 10
    val convergenceTol = 0.000001
    val maxNumIterations = 100
    val regParam = 0.1

    //权重个数,wc,W1,e...,Wk,e,...W1,d,.....WK,d 按渠道来分组，来进行权重的划分恩
    var  initialWeights:Vector = null
    if (Weights == null)
       initialWeights = Vectors.dense(
       {
         //给出初始个数
         var weightArray = new Array[Double](BasicFunction.featureC_length + BasicFunction.channelSize * (BasicFunction.featureD_length + BasicFunction.featureE_length))
         weightArray = weightArray.map(x => 0.0)
         //给出权重初值
         weightArray

       }).toSparse
    else
      initialWeights = Weights

    val (weightsResult, loss) = LBFGS.runLBFGS(training, new MyGradient(),new SquaredL2Updater(), numCorrections,convergenceTol, maxNumIterations, regParam, initialWeights)
//    val (weightsResult,loss) = GradientDescent.runMiniBatchSGD(training,new MyGradient(),new L1Updater(),0.8 , numCorrections, regParam,0.8,initialWeights )

    loss.foreach(println)

    val model = new MutiTouchModel()
    model.loadModel(weightsResult)
    model.saveModel(BasicFunction.mode)

    //存在文件里,给出LOSS值
    if(BasicFunction.mode == "StandAlone") {
      val writer = new PrintWriter(new File(BasicFunction.basicpath + "/loss"))
      writer.write(loss.toString)
      writer.close()
    }

    else {
      val jedis = RedisClient.pool.getResource
      jedis.select(BasicFunction.dbIndex)
      jedis.set("LossList", loss.mkString(","))
      jedis.close()
    }

    //尝试梯度下降
    return   (weightsResult,loss)
  }

  //给出测试样本 测试数据，来进行样本的测试
  //计算样本的ROC等值
  def testSample(sc:SparkContext,test:RDD[LabeledPoint]): Unit =
  {
    //给出相应的模型，进行预测 来进行判断
    val model = new MutiTouchModel()
    model.loadModel(BasicFunction.mode)
    val modelboard = sc.broadcast(model)
    // Compute raw scores on the test set.
   val scoreAndLabels = test.map(point =>{
   val score = modelboard.value.predict(point.features)
   (score, point.label)}
      )


    // Get evaluation metrics.
    val metrics = new BinaryClassificationMetrics(scoreAndLabels)

    //计算ROC的面积
    val auROC = metrics.areaUnderROC()
    println("Area under ROC = " + auROC)

   }

  //准备训练或者测试的数据
  def prepareData(sc:SparkContext): (RDD[LabeledPoint],RDD[LabeledPoint]) =
  {
    //正例和负例存放在不同的地方,分别读取
    //给出数量
    var positiveCount = 0
    var positiveTestCount = 0
    var positive_data:RDD[LabeledPoint]  = null
    var positive_data_test:RDD[LabeledPoint]  = null

    if (BasicFunction.sample_positive_save_path != null){
        val loadpath = BasicFunction.basicloadpath(BasicFunction.sample_positive_save_path)
        positive_data = ETL.loadSampleData(sc,loadpath).map(x => LabeledPoint(1, Vectors.dense(x._2).toSparse))
        val splitData = positive_data.randomSplit(Array(0.5, 0.5), seed = 11L)
        //取出部分做训练用，部分做测试用
        positive_data = splitData(0)
        positive_data_test = splitData(1)

        positiveCount = positive_data.count().toInt
        positiveTestCount = positive_data_test.count().toInt
      }


    var negative_data:RDD[LabeledPoint]  = null
    var negative_data_test:RDD[LabeledPoint]  = null

    if (BasicFunction.sample_negative_save_path != null) {
      val loadpath = BasicFunction.basicloadpath(BasicFunction.sample_negative_save_path)
      negative_data = ETL.loadSampleData(sc, loadpath).map(x => LabeledPoint(0, Vectors.dense(x._2).toSparse))

      //根据正例数量适当选取负例
      if (positiveCount != 0) {
          //求出比例 按比率分割,给出100倍，测试和训练都遵照这个规则
          val rate = positiveCount.toDouble / negative_data.count().toDouble * 100
          val splitData = negative_data.randomSplit(Array(rate, 1 - rate), seed = 11L)

          negative_data = splitData(0)

          val rate2 = positiveTestCount.toDouble / splitData(1).count().toDouble * 100
          negative_data_test =  splitData(1).randomSplit(Array(rate2, 1 - rate2), seed = 11L)(0)
      }
    }


    //给出训练集合和测试集
    var train:RDD[LabeledPoint] = null
    var test:RDD[LabeledPoint] = null

    //对正负例的比率做处理,拼接训练集
    if (positive_data != null && negative_data != null){
        train =  positive_data.union(negative_data)
        test  = positive_data_test.union(negative_data_test)
      }
    else if (positive_data != null){
      train =  positive_data
      test = positive_data_test
    }
    else {
      train = negative_data
      test = negative_data_test
    }

    //返回测试集和训练集集和
    return (train,test)

  }


}
