package com.algo.url;

import org.apache.spark._
import org.apache.spark.broadcast
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD

import tw.edu.ntu.csie.liblinear._
/**
 * this class wrap classifier model from spark-liblinear, support multiple labels classification
 * <br/> the input RDD format is RDD[DataPoint]
 * <br/> the model wraps functions below:
 * 
 * <br/> 1. normalizing feature
 * <br/> 2. balance samples
 * <br/> 3. n-fold cross-validation
 * <br/> 4. predict
 * 
 * @author lujianfeng@miaozhen.com
 * 
 */

object WrappedClassifierModel {

  val maxClassNum = 10
  
  def balance(trainingData: RDD[DataPoint]): RDD[DataPoint] = {
    val list = new Array[RDD[DataPoint]](maxClassNum)
    val sampleNum = new Array[Long](maxClassNum)
    var i = 0
    var notBreak = true
    while(i < maxClassNum && notBreak){
      list(i) = trainingData.filter(tp => tp.y.toInt == i).cache
      sampleNum(i) = list(i).count
      println("sampNum = " + sampleNum(i))
      if(sampleNum(i) == 0)	//if this class sample number is 0, we regard it is the last class label
        notBreak = false
      i += 1
    }
    val numClasses = i-2
    val maxSampleNum = sampleNum.max
    println("sample class number = " + numClasses)
    for(i <- 0 to numClasses){
      val sampFraction = (maxSampleNum - sampleNum(i)).toDouble/sampleNum(i).toDouble
      println("sampleNum = " + sampFraction)
      val moreSamples = list(i).sample(true, sampFraction, System.currentTimeMillis())
		println("moreSamp = " + moreSamples.count)
	      list(i) = list(i).union(moreSamples)
		println("moreSamp = " + list(i).count)
    }
    val sc = trainingData.sparkContext
    sc.union(list.take(numClasses+1))
  }
  
  def normalize(trainingData: RDD[DataPoint]){
    trainingData.foreach(point => {val sum = point.value.fold(0: Double)((sum, elem) => sum+elem)
      point.value.foreach(elem => elem/sum)
    })
  }
  
  def validate(trainingData: RDD[DataPoint], param: String, nFold: Int): Double = {
    //for simplicity, here we don't apply the classical n-fold validation
    val result = for(i <- 1 to nFold) yield {
    	val splits = trainingData.randomSplit(Array(1-1.0/nFold, 1.0/nFold))
	    val training = splits(0).cache
	    val test = splits(1).cache
	    val model = SparkLiblinear.train(training, param)
	    val labelAndPreds = test.map { point => 
			val prediction = model.predict(point)
			(point.y, prediction)
		}
	    val accuracy = labelAndPreds.filter(r => r._1.toInt == r._2.toInt).count.toDouble / test.count
	    accuracy
    }
    val meanAccuracy = result.foldLeft(0: Double)((sum, elem) => sum+elem)/result.size
    meanAccuracy
  }
  def validate(trainingData: RDD[DataPoint], param: String, nFold: Int, isBalance: Boolean, isNormalize: Boolean)
  	: Double = {
    val trainingData1 = if(isBalance)	balance(trainingData)
    			else trainingData
    if(isNormalize) normalize(trainingData1)
    validate(trainingData1, param, nFold)
  }
  
  def train(trainingData: RDD[DataPoint], param: String, isBalance: Boolean, isNormalize: Boolean) : Model = {
    val trainingData1 = if(isBalance)	balance(trainingData)
    			else trainingData
    if(isNormalize) normalize(trainingData1)
    SparkLiblinear.train(trainingData1, param)
  }
  
}
