//package Sun
//
///**
// * Created by SHANGMAI on 2016/10/12.
// */
//
//
//import BLAS._
//import breeze.linalg.SparseVector
////import breeze.linalg.{DenseVector => BDV, SparseVector, DenseVector, Vector}
//import org.apache.spark.mllib.linalg.{SparseVector, DenseVector, Vector, Vectors}
//
//import org.apache.spark.mllib.optimization.Gradient
//import org.apache.spark.mllib.optimization.LBFGS
//
//import org.apache.spark.mllib.optimization.Optimizer
//import org.apache.spark.mllib.optimization.Updater
//import org.apache.spark.rdd.RDD
//
//import scala.collection.mutable
//import scala.collection.mutable.ArrayBuffer
//import breeze.optimize.{CachedDiffFunction, DiffFunction, LBFGS => BreezeLBFGS}
//
//import org.apache.spark.Logging
//import org.apache.spark.annotation.DeveloperApi
//import org.apache.spark.rdd.RDD
//import org.apache.spark.mllib.optimization._
//
//
//import java.util
//import java.lang.{Double => JavaDouble, Integer => JavaInteger, Iterable => JavaIterable}
//
//import scala.annotation.varargs
//import scala.collection.JavaConverters._
//
//import breeze.linalg.{DenseVector => BDV, SparseVector => BSV, Vector => BV}
//
//import org.apache.spark.SparkException
//import org.apache.spark.annotation.DeveloperApi
//import org.apache.spark.mllib.util.NumericParser
//import org.apache.spark.sql.Row
//import org.apache.spark.sql.catalyst.expressions.GenericMutableRow
//import org.apache.spark.sql.types._
//
///**
// * :: DeveloperApi ::
// * Top-level method to run L-BFGS.
// */
//@DeveloperApi
//object MyLBFGS extends Logging {
//  /**
//   * Run Limited-memory BFGS (L-BFGS) in parallel.
//   * Averaging the subgradients over different partitions is performed using one standard
//   * spark map-reduce in each iteration.
//   *
//   * @param data - Input data for L-BFGS. RDD of the set of data examples, each of
//   *               the form (label, [feature values]).
//   * @param gradient - Gradient object (used to compute the gradient of the loss function of
//   *                   one single data example)
//   * @param updater - Updater function to actually perform a gradient step in a given direction.
//   * @param numCorrections - The number of corrections used in the L-BFGS update.
//   * @param convergenceTol - The convergence tolerance of iterations for L-BFGS which is must be
//   *                         nonnegative. Lower values are less tolerant and therefore generally
//   *                         cause more iterations to be run.
//   * @param maxNumIterations - Maximal number of iterations that L-BFGS can be run.
//   * @param regParam - Regularization parameter
//   *
//   * @return A tuple containing two elements. The first element is a column matrix containing
//   *         weights for every feature, and the second element is an array containing the loss
//   *         computed for every iteration.
//   */
//  def runLBFGS(
//                data: RDD[(Double, Vector)],
//                gradient: Gradient,
//                updater: Updater,
//                numCorrections: Int,
//                convergenceTol: Double,
//                maxNumIterations: Int,
//                regParam: Double,
//                initialWeights: Vector): (Vector, Array[Double]) = {
//
//    val lossHistory = mutable.ArrayBuilder.make[Double]
//
//    val numExamples = data.count()
//
//    val costFun =
//      new CostFun(data, gradient, updater, regParam, numExamples)
//
//    val lbfgs = new BreezeLBFGS[BDV[Double]](maxNumIterations, numCorrections, convergenceTol)
//
//    val states =
//      lbfgs.iterations(new CachedDiffFunction(costFun), initialWeights.toBreeze.toDenseVector)
//
//    /**
//     * NOTE: lossSum and loss is computed using the weights from the previous iteration
//     * and regVal is the regularization value computed in the previous iteration as well.
//     */
//    var state = states.next()
//    while (states.hasNext) {
//      lossHistory += state.value
//      state = states.next()
//    }
//    lossHistory += state.value
//    val weights = fromBreeze(state.x)
//
//    val lossHistoryArray = lossHistory.result()
//
//    logInfo("LBFGS.runLBFGS finished. Last 10 losses %s".format(
//      lossHistoryArray.takeRight(10).mkString(", ")))
//
//    (weights, lossHistoryArray)
//  }
//
//  /**
//   * CostFun implements Breeze's DiffFunction[T], which returns the loss and gradient
//   * at a particular point (weights). It's used in Breeze's convex optimization routines.
//   */
//  private class CostFun(
//                         data: RDD[(Double, Vector)],
//                         gradient: Gradient,
//                         updater: Updater,
//                         regParam: Double,
//                         numExamples: Long) extends DiffFunction[BDV[Double]] {
//
//    override def calculate(weights: BDV[Double]): (Double, BDV[Double]) = {
//      // Have a local copy to avoid the serialization of CostFun object which is not serializable.
//      val w = fromBreeze(weights)
//      val n = w.size
//      val bcW = data.context.broadcast(w)
//      val localGradient = gradient
//
//      val (gradientSum, lossSum) = data.treeAggregate((Vectors.zeros(n), 0.0))(
//        seqOp = (c, v) => (c, v) match { case ((grad, loss), (label, features)) =>
//          val l = localGradient.compute(
//            features, label, bcW.value, grad)
//          (grad, loss + l)
//        },
//        combOp = (c1, c2) => (c1, c2) match { case ((grad1, loss1), (grad2, loss2)) =>
//          axpy(1.0, grad2, grad1)
//          (grad1, loss1 + loss2)
//        })
//
//      /**
//       * regVal is sum of weight squares if it's L2 updater;
//       * for other updater, the same logic is followed.
//       */
//      val regVal = updater.compute(w, Vectors.zeros(n), 0, 1, regParam)._2
//
//      val loss = lossSum / numExamples + regVal
//      /**
//       * It will return the gradient part of regularization using updater.
//       *
//       * Given the input parameters, the updater basically does the following,
//       *
//       * w' = w - thisIterStepSize * (gradient + regGradient(w))
//       * Note that regGradient is function of w
//       *
//       * If we set gradient = 0, thisIterStepSize = 1, then
//       *
//       * regGradient(w) = w - w'
//       *
//       * TODO: We need to clean it up by separating the logic of regularization out
//       *       from updater to regularizer.
//       */
//      // The following gradientTotal is actually the regularization part of gradient.
//      // Will add the gradientSum computed from the data with weights in the next step.
//      val gradientTotal = w.copy
//      axpy(-1.0, updater.compute(w, Vectors.zeros(n), 1, 1, regParam)._1, gradientTotal)
//
//      // gradientTotal = gradientSum / numExamples + gradientTotal
//      axpy(1.0 / numExamples, gradientSum, gradientTotal)
//
//      (loss, gradientTotal.toBreeze.asInstanceOf[BDV[Double]])
//    }
//
//
//    /**
//     * Creates a vector instance from a breeze vector.
//     */
//    def fromBreeze(breezeVector: BV[Double]): Vector = {
//      breezeVector match {
//        case v: BDV[Double] =>
//          if (v.offset == 0 && v.stride == 1 && v.length == v.data.length) {
//            new DenseVector(v.data)
//          } else {
//            new DenseVector(v.toArray)  // Can't use underlying array directly, so make a new one
//          }
//        case v: BSV[Double] =>
//          if (v.index.length == v.used) {
//            new SparseVector(v.length, v.index, v.data)
//          } else {
//            new SparseVector(v.length, v.index.slice(0, v.used), v.data.slice(0, v.used))
//          }
//        case v: BV[_] =>
//          sys.error("Unsupported Breeze vector type: " + v.getClass.getName)
//      }
//    }
//  }
//}
