package cvb

import org.apache.spark.SparkContext._
import com.google.common.base.Preconditions
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{PathFilter, FileStatus, Path, FileSystem}
import org.apache.mahout.math._
import org.apache.spark.{SparkConf, SparkContext}
import org.slf4j.{Logger, LoggerFactory}
import scala.Double
import scala.util.Random
import org.apache.mahout.math.function.Functions
import org.apache.spark.rdd.RDD
import math.{MyDenseMatrix, MySparseRowMatrix, MyDenseVector}
import java.io.IOException


class MyRandom(seed:Long) extends Random(seed) with Serializable {
                                                             
}

                                                             
//class MyDenseMatrix(rows: Int, columns: Int) extends DenseMatrix(rows, columns) with Serializable{
//
//}
//
//class MySparseRowMatrix(rows: Int,columns: Int) extends SparseRowMatrix(rows,columns) with Serializable {
//
//}
//
//class MyDenseVector(array:Array[Double]) extends DenseVector(array) with Serializable {
//    def this(size:Int) = {
//      this(new Array[Double](size))
//    }
//}

/**
 * User: biandi
 * Date: 14-3-20
 * Time: 下午8:12
 */
object LdaStart extends App{

  private final val log: Logger = LoggerFactory.getLogger(classOf[LdaStart])

  var sc: SparkContext = null

  //  def main(args: Array[String]) = {
  val sparkConf = new SparkConf()
  sparkConf.setMaster("local[1]")
  sparkConf.setAppName("Lda")

  sc = new SparkContext(sparkConf)
  val inputPath = new Path("/home/ji/test/input")
  val outputPath = new Path("/home/ji/test/output")

  /*val iterator = args.toIterator
  val numTopics = Integer.parseInt(iterator.next())
  val maxIterations = Integer.parseInt(iterator.next())
  val iterationBlockSize = Integer.parseInt(iterator.next())
  val convergenceDelta = java.lang.Double.parseDouble(iterator.next())
  val alpha = java.lang.Double.parseDouble(iterator.next())
  val eta = java.lang.Double.parseDouble(iterator.next())
  val numTrainThreads = Integer.parseInt(iterator.next())
  val numUpdateThreads = Integer.parseInt(iterator.next())
  val maxItersPerDoc = Integer.parseInt(iterator.next())


  //可选参数
  val dictionaryPath = iterator.next()
  val numTerms = Integer.parseInt(iterator.next())
  val docTopicOutputPath = new Path(iterator.next())
  val modelTempPath = new Path(iterator.next())
  val seed = java.lang.Long.parseLong(iterator.next())
  val testFraction = java.lang.Float.parseFloat(iterator.next())
  val numReduceTasks = Integer.parseInt(iterator.next())
  //num-workers取代
  val backfillPerplexity = java.lang.Boolean.parseBoolean(iterator.next())*/


  //fixme read from variables

  val numTopics = 5
  val maxIterations = 20
  val iterationBlockSize = 3
  val convergenceDelta = 1.0
  val alpha = 0.1
  val eta = 0.1
  val maxItersPerDoc = 10


  //可选参数
  //    val dictionaryPath = iterator.next()
  val numTerms = 26
  val docTopicOutputPath = new Path("/home/ji/test/output/docTopic")
  val modelTempPath = new Path("/home/ji/test/output/modelTemp")
  val seed = 10
  val testFraction = 0.5f
  //num-workers取代
  val backfillPerplexity = true


  val modelWeight = 1.0



  Preconditions.checkArgument(testFraction >= 0.0 && testFraction <= 1.0, "Expected 'testFraction' value in range [0, 1] but found value '%s'", String.valueOf(testFraction))
  Preconditions.checkArgument(!backfillPerplexity || testFraction > 0.0, "Expected 'testFraction' value in range (0, 1] but found value '%s'", String.valueOf(testFraction))
  val infoString: String = "Will run Collapsed Variational Bayes (0th-derivative approximation) " + "learning for LDA (numTerms: {}), finding {}-topics, with document/topic prior {}, " + "topic/term prior {}.  Maximum iterations to run will be {}, unless the change in " + "perplexity is less than {}.  Topic model output (p(term|topic) for each topic) will be " + "stored {}.  MyRandom initialization seed is {}, holding out {} of the data for perplexity " + "check\n"
  log.info(infoString, Array(String.valueOf(numTerms), String.valueOf(numTopics), String.valueOf(alpha), String.valueOf(eta), String.valueOf(maxIterations), String.valueOf(convergenceDelta), String.valueOf(docTopicOutputPath), String.valueOf(seed), String.valueOf(testFraction)))

  val conf = new Configuration()

  var iterationNumber = getCurrentIterationNumber(conf, modelTempPath, maxIterations)
  log.info("Current iteration number: {}", iterationNumber)
  var perplexities: List[Double] = List.empty
  for (i <- 1 to iterationNumber if readPerplexity(modelTempPath, conf, i).isNaN && backfillPerplexity && i % iterationBlockSize == 0) {
    val modelPath = getModelPath(modelTempPath, i)
    log.info("Backfilling perplexity at iteration {}", i)
    val perplexity = calculatePerplexity(modelPath, inputPath, testFraction, maxItersPerDoc, conf, i, numTopics, numTerms, eta, alpha, seed,  modelWeight)
    perplexities ::= perplexity
    log.info("Perplexity at iteration {} = {}", i, perplexity)
  }
  perplexities = perplexities.reverse

  var rdd: RDD[(Int, Array[Double])] = null
  val startTime = System.currentTimeMillis
  for {i <- iterationNumber until (maxIterations)
       if convergenceDelta > 0.0
       delta = rateOfChange(perplexities)
       if (delta >= convergenceDelta)
  } {
    iterationNumber += 1
    log.info("About to run iteration {} of {}", iterationNumber, maxIterations)

    val modelOutputPath = getModelPath(modelTempPath, iterationNumber)

    rdd = runIteraion(conf, inputPath, maxIterations, rdd, testFraction, maxItersPerDoc, i, numTopics, numTerms, eta, alpha, seed, modelWeight)

    rdd.map(kv=>{
      kv._1+" "+kv._2.mkString(",")
    }).saveAsTextFile(modelOutputPath.toUri.toString)

    if (testFraction > 0 && iterationNumber % iterationBlockSize == 0) {
      perplexities ::= calculatePerplexity(modelOutputPath, inputPath, testFraction, maxItersPerDoc, conf, i, numTopics, numTerms, eta, alpha, seed, modelWeight)
      log.info("Current perplexity = {}", perplexities(perplexities.size - 1))
      log.info("(p_{} - p_{}) / p_0 = {}; target = {}", iterationNumber.toString, (iterationNumber - iterationBlockSize).toString, rateOfChange(perplexities).toString, convergenceDelta.toString)
    }
  }

  perplexities = perplexities.reverse
  log.info("Completed {} iterations in {} seconds", iterationNumber, (System.currentTimeMillis - startTime) / 1000)
  log.info("Perplexities: ({})", Arrays.toString(perplexities.toArray))

  // write final topic-term and doc-topic distributions
  if (docTopicOutputPath != null) {
    writeTopicModel(rdd, outputPath);
  }
  if (docTopicOutputPath != null) {
    writeDocTopicInference(conf, inputPath, docTopicOutputPath, rdd, eta, alpha, modelWeight, maxIterations)
  }

  def writeDocTopicInference(conf: Configuration, corpus: Path, output: Path, rdd:RDD[(Int, Array[Double])], eta: Double, alpha: Double, modelWeight: Double, maxIters:Int) = {
    val jobName: String = String.format("Writing final document/topic inference from %s to %s", corpus, output)
    log.info("About to run: {}", jobName)

    val readModel = new TopicModel(conf, eta, alpha, null, modelWeight, rdd)
    val bdReadModel = sc.broadcast(readModel)

    sc.textFile(corpus.toUri.toString).map(line => {
      val docTopics: MyDenseVector = new MyDenseVector(numTopics).assign(1.0 / numTopics).asInstanceOf[MyDenseVector]
      val (key, seq) = analysisMatrixFromSparse(line)
      val array = new Array[Double](numTerms)
      seq.foreach(kv=>{
        array.update(kv._1, kv._2)
      })
      val document = new MyDenseVector(array)

      val docModel: MySparseRowMatrix = new MySparseRowMatrix(numTopics, document.size)
      (0 until maxIters).foreach(x => bdReadModel.value.trainDocTopicModel(document, docTopics, docModel))
      key+" "+docTopics.getValues.mkString(",")
    }).saveAsTextFile(output.toUri.toString)

  }

  def writeTopicModel(rdd: RDD[(Int, Array[Double])], output: Path) = {
    rdd.map(kv => {
      val vector = new MyDenseVector(kv._2)
      vector.assign(Functions.div(vector.norm(1.0)))
      kv._1 + " " + kv._2.mkString(",")
    }).saveAsTextFile(output.toUri.toString)
  }

  def runIteraion(conf: Configuration, input: Path, maxIterations:Int, lastRdd: RDD[(Int, Array[Double])],testFraction: Float, maxItersPerDoc: Int,  iteration: Int,
                  numTopics: Int, numTerms: Int, eta: Double, alpha: Double, seed: Long, modelWeight: Double): RDD[(Int, Array[Double])] = {
    val rdd: RDD[(Int, Array[Double])] = if (lastRdd == null) {
      sc.textFile(input.toUri.toString).map(line => {
        val (key, seq) = analysisMatrixFromSparse(line)
        val array = new Array[Double](numTerms)
        seq.foreach(kv=>{
          array.update(kv._1, kv._2)
        })
        (key.toInt, array)
      })
    } else {
      lastRdd
    }


    val readModel =
      if (lastRdd != null) {
        new TopicModel(conf, eta, alpha, null, modelWeight, rdd)
      } else {
        log.info("No model files found")
        new TopicModel(numTopics, numTerms, eta, alpha, new MyRandom(seed), null, modelWeight)
      }

    val writeModel: TopicModel = if (modelWeight == 1.0) new TopicModel(numTopics, numTerms, eta, alpha, null, modelWeight) else readModel



    val bdReadModel = sc.broadcast(readModel)
    val bdWriteModel = sc.broadcast(writeModel)


    val newRdd = rdd.flatMap(kv => {
      val document = new MyDenseVector(kv._2)
      val topicVector = new MyDenseVector(numTopics).assign(1.0 / numTopics).asInstanceOf[MyDenseVector]

      val docTopicModelMatrix = new MySparseRowMatrix(numTopics, numTerms)
      (0 until maxIterations).foreach(x => bdReadModel.value.trainDocTopicModel(document, topicVector, docTopicModelMatrix))

      (0 until docTopicModelMatrix.rowSize()).map(x => (x, docTopicModelMatrix.viewRow(x)))
    }).flatMap(kv => {
      bdWriteModel.value.topicTermCounts.viewRow(kv._1).assign(kv._2, Functions.PLUS)

      bdWriteModel.value.topicSums.set(kv._1, bdWriteModel.value.topicSums.get(kv._1) + kv._2.norm(1))

      var list: List[(Int, Array[Double])] = List.empty

      val ite = bdWriteModel.value.topicTermCounts.iterateAll()
      while (ite.hasNext) {
        val topic = ite.next()
        list ::=(topic.index(), topic.vector().asInstanceOf[MyDenseVector].getValues)
      }

      list.reverse
    }).reduceByKey((a, b) => {
      new MyDenseVector(a).assign(new MyDenseVector(b), Functions.PLUS).asInstanceOf[MyDenseVector].getValues
    })

    newRdd
  }


  def rateOfChange(perplexities: List[Double]): Double = {
    val sz = perplexities.size
    if (sz < 2) {
      Double.MaxValue
    } else {
      Math.abs(perplexities(sz - 1) - perplexities(sz - 2)) / perplexities(0)
    }
  }

  def calculatePerplexity(modelPath: Path, inputPath: Path, testFraction: Float, maxItersPerDoc: Int, conf: Configuration, iteration: Int,
                          numTopics: Int, numTerms: Int, eta: Double, alpha: Double, seed: Long, modelWeight: Double): Double = {
    val outputPath: Path = getPerplexityPath(modelPath.getParent, iteration)
    val statuses: Array[FileStatus] = FileSystem.get(modelPath.toUri, conf).listStatus(modelPath, PathFilters.partFilter)
    val modelPaths: Seq[Path] = statuses.map(status => status.getPath)
    val readModel = if (modelPaths.isEmpty) {
      log.info("No model files found")
      new TopicModel(numTopics, numTerms, eta, alpha, new MyRandom(seed), null, modelWeight)
    } else {
      new TopicModel(eta, alpha, null, modelWeight, modelPaths)
    }

    val bdModel = sc.broadcast(readModel)

    log.info("Initializing topic vector")
    val topicVector = new MyDenseVector(numTopics)
    val bdTopicVector = sc.broadcast(topicVector)
    val MyRandom = new MyRandom(seed)

    //input输入格式为docId /t termId,termId...
    sc.textFile(inputPath.toUri.toString).filter(line => {
      !(testFraction < 1.0f && MyRandom.nextFloat >= testFraction)
    }).map(
      line => {
        val (_, seq) = analysisMatrixFromSparse(line)
        val array = new Array[Double](numTerms)
        seq.foreach(kv=>{
          array.update(kv._1, kv._2)
        })
        val document = new MyDenseVector(array)
        val kv = (document.norm(1), TopicModel.calculatePerplexity(document, bdTopicVector.value.assign(1.0 / numTopics).asInstanceOf[MyDenseVector], maxItersPerDoc, numTopics, numTerms, bdModel.value))
        kv
      }).reduceByKey((a, b) => {
      a + b
    }).saveAsTextFile(outputPath.toUri.toString)

    readPerplexity(modelPath.getParent, conf, iteration)
  }

  def analysisMatrixFromSparse(line: String): (String, Seq[(Int, Double)]) = {
    val array = line.split(" ")
    val key = array(0)
    println(array(1))
    val values = array(1).split(",")
    val sparseVector = values.map(e=> {
      val kv = e.split(":")
      (kv(0).toInt, kv(1).toDouble)
    })
    (key, sparseVector)
  }

  def analysisMatrixFromDense(line: String): (String, Array[String]) = {
    val array = line.split(" ")
    val key = array(0)
    val values = array(1).split(",")
    (key, values)
  }

  def getModelPath(topicModelStateTempPath: Path, iterationNumber: Int): Path = {
    new Path(topicModelStateTempPath, "model-" + iterationNumber)
  }

  def getCurrentIterationNumber(config: Configuration, modelTempDir: Path, maxIterations: Int) = {
    val fs: FileSystem = FileSystem.get(modelTempDir.toUri, config)
    var iterationNumber: Int = 1
    var iterationPath: Path = getModelPath(modelTempDir, iterationNumber)

    while (fs.exists(iterationPath) && iterationNumber <= maxIterations) {
      log.info("Found previous state: {}", iterationPath)
      iterationNumber += 1
      iterationPath = getModelPath(modelTempDir, iterationNumber)
    }
    iterationNumber - 1
  }

  def getPerplexityPath(topicModelStateTempPath: Path, iterationNumber: Int): Path = {
    new Path(topicModelStateTempPath, "perplexity-" + iterationNumber)
  }

  def readPerplexity(topicModelStateTemp: Path, conf: Configuration, iteration: Int): Double = {
    val perplexityPath = getPerplexityPath(topicModelStateTemp, iteration)
    val fs: FileSystem = FileSystem.get(conf)
    fs.listStatus(topicModelStateTemp).toString
    if (!fs.exists(perplexityPath)) {
      log.warn("Perplexity path {} does not exist, returning NaN", perplexityPath)
      Double.NaN
    } else {
      //每行存一个键值对
      var n = 0;
      val (perplexity, modelWeight) = sc.textFile(perplexityPath.toUri.toString).map(line => {
        val array = line.substring(1, line.length - 1).split(",")
        (array(0).toDouble, array(1).toDouble)
      }).reduce((a, b) => {
        n += 1
        (a._1 + b._1, a._2 + b._2)
      })
      log.info("Read {} entries with total perplexity {} and model weight {}", Array(String.valueOf(n), String.valueOf(perplexity), String.valueOf(modelWeight)))
      perplexity / modelWeight
    }
  }
}

class TopicModel(matrixAndVector: (MyDenseMatrix, MyDenseVector), eta: Double, alpha: Double, dictionary: List[String], modelWeight: Double) extends Serializable {

  val log = LoggerFactory.getLogger(TopicModel.getClass)

  val topicSums = matrixAndVector._2
  val numTopics = topicSums.size()
  val topicTermCounts = matrixAndVector._1
  val numTerms = topicTermCounts.numCols

  if (modelWeight != 1) {
    topicSums.assign(Functions.mult(modelWeight))
    for (i <- 0 until numTopics) topicTermCounts.viewRow(i).assign(Functions.mult(modelWeight))
  }

  def this(eta: Double, alpha: Double, dictionary: List[String], modelWeight: Double, modelPaths: Seq[Path]) {
    this(TopicModel.createMatrixAndVector(modelPaths), eta, alpha, dictionary, modelWeight)
  }

  def this(numTopics: Int, numTerms: Int, eta: Double, alpha: Double, MyRandom: MyRandom, dictionary: List[String], modelWeight: Double) {
    this(TopicModel.createMatrixAndVector(numTopics, numTerms, MyRandom), eta, alpha, dictionary, modelWeight)
  }

  def this(conf: Configuration, eta: Double, alpha: Double, dictionary: List[String], modelWeight: Double, rdd: RDD[(Int, Array[Double])]) {
    this(TopicModel.createMatrixAndVector(rdd), eta, alpha, dictionary, modelWeight)
  }


  def this(conf:Configuration, eta: Double, alpha: Double, dictionary: List[String], modelWeight: Double, modelPahts: Seq[Path]) {
    this(TopicModel.createMatrixAndVector(modelPahts), eta, alpha, dictionary, modelWeight)
  }

  def this(numTopics: Int, numTerms: Int, eta: Double, alpha: Double, dictionary: List[String], modelWeight: Double) {
    this(numTopics, numTerms, eta, alpha, null, dictionary, modelWeight)
  }


  override def toString: String = topicTermCounts.toString + topicSums.toString

  /**
   * \(sum_x sum_a (c_ai * log(p(x|i) * p(a|x)))\)
   */
  def perplexity(document: Vector, docTopics: Vector): Double = {
    var perplexity: Double = 0
    val norm: Double = docTopics.norm(1) + (docTopics.size * alpha)
    val it = document.nonZeroes().iterator()
    while (it.hasNext) {
      val e = it.next()
      val term: Int = e.index()
      var prob: Double = 0
      for (x <- 0 until numTopics) {
        val d: Double = (docTopics.get(x) + alpha) / norm
        val p: Double = d * (matrixAndVector._1.viewRow(x).get(term) + eta) / (matrixAndVector._2.get(x) + eta * numTerms)
        prob += p
      }
      perplexity += e.get * Math.log(prob)
    }
    -perplexity
  }

  def trainDocTopicModel(original: Vector, topics: Vector, docTopicModel: MySparseRowMatrix) = {
    // first calculate p(topic|term,document) for all terms in original, and all topics,
    // using p(term|topic) and p(topic|doc)
    pTopicGivenTerm(original, topics, docTopicModel)
    normalizeByTopic(docTopicModel)
    // now multiply, term-by-term, by the document, to get the weighted distribution of
    // term-topic pairs from this document.
    val ite = original.nonZeroes().iterator()
    while (ite.hasNext) {
      val e = ite.next()
      (0 until numTopics).foreach(x => {
        val docTopicModelRow: Vector = docTopicModel.viewRow(x)
        docTopicModelRow.setQuick(e.index, docTopicModelRow.getQuick(e.index) * e.get)
      })
    }
    // now recalculate \(p(topic|doc)\) by summing contributions from all of pTopicGivenTerm
    topics.assign(0.0)
    (0 until numTopics).foreach(x => topics.set(x, docTopicModel.viewRow(x).norm(1)))
    // now renormalize so that \(sum_x(p(x|doc))\) = 1
    topics.assign(Functions.mult(1 / topics.norm(1)))
  }

  private def pTopicGivenTerm(document: Vector, docTopics: Vector, termTopicDist: MySparseRowMatrix) = {
    (0 until numTopics).foreach(x => {
      val topicWeight: Double = if (docTopics == null) 1.0 else docTopics.get(x)
      val topicTermRow: Vector = matrixAndVector._1.viewRow(x)
      val topicSum: Double = matrixAndVector._2.get(x)
      val termTopicRow: Vector = termTopicDist.viewRow(x)
      val ite = document.nonZeroes().iterator()
      while (ite.hasNext) {
        val e = ite.next()
        val termTopicLikelihood: Double = (topicTermRow.get(e.index()) + eta) * (topicWeight + alpha) / (topicSum + eta * numTerms)
        termTopicRow.set(e.index(), termTopicLikelihood)
      }
    })
  }

  private def normalizeByTopic(perTopicSparseDistributions: MySparseRowMatrix) {
    val ite = perTopicSparseDistributions.viewRow(0).nonZeroes().iterator()
    while (ite.hasNext) {
      val e = ite.next()
      val a: Int = e.index
      var sum: Double = 0
      (0 until numTopics).foreach(x => sum += perTopicSparseDistributions.viewRow(x).get(a))
      (0 until numTopics).foreach(x => perTopicSparseDistributions.viewRow(x).set(a, perTopicSparseDistributions.viewRow(x).get(a) / sum))
    }
  }


}

object TopicModel {

  def calculatePerplexity(document: MyDenseVector, docTopicCounts: MyDenseVector, numDocTopicIters: Int, numTopics: Int, numTerms: Int, readModel: TopicModel): Double = {
    val docTopicModelMatrix = new MySparseRowMatrix(numTopics, numTerms)
    for (i <- 0 until numDocTopicIters) {
      readModel.trainDocTopicModel(document, docTopicCounts, docTopicModelMatrix)
    }
    readModel.perplexity(document, docTopicCounts)
  }

  def createMatrixAndVector(rdd: RDD[(Int, Array[Double])]): (MyDenseMatrix, MyDenseVector) = {
    val (rowList, index, numTerms) = rdd.map(kv => {
      (List((kv._1, kv._2)), kv._1, kv._2.size)
    }).reduce((a, b) => {
      (a._1 ::: b._1, Math.max(a._2, b._2), a._3)
    })

    val numTopics = index + 1
    val model = new MyDenseMatrix(numTopics, numTerms)
    val topicSums = new MyDenseVector(numTopics)
    val rows = rowList.reverse
    for ((k, v) <- rows) {
      model.viewRow(k).assign(v)
      topicSums.set(k, new DenseVector(v).norm(1))
    }
    (model, topicSums)
  }


  def createMatrixAndVector(modelPaths: Seq[Path]): (MyDenseMatrix, MyDenseVector) = {
    var rows: List[(Int, Array[Double])] = List.empty
    var numTopicsIndex = -1
    var numTermsIndex = -1
    modelPaths.foreach(modelPath => {
      val (rowList, index, numTerms) = LdaStart.sc.textFile(modelPath.toUri.toString).map(line => {
        val (key, values) = LdaStart.analysisMatrixFromDense(line);
        val array = new Array[Double](values.length);
        values.map(str=>str.toDouble).copyToArray(array)
        (List((key.toInt, array)), key.toInt, array.size)
      }).reduce((a, b) => {
        (a._1 ::: b._1, Math.max(a._2, b._2), a._3)
      })
      rows :::= rowList
      numTopicsIndex = index
      numTermsIndex = numTerms
    })

    if (rows.isEmpty) {
      throw new IOException(modelPaths + " have no vectors in it")
    }
    val numTopics = numTopicsIndex + 1
    val model: MyDenseMatrix = new MyDenseMatrix(numTopics, numTermsIndex)
    val topicSums = new MyDenseVector(numTopics)
    for ((k, v) <- rows) {
      model.viewRow(k).assign(v)
      topicSums.set(k, new MyDenseVector(v).norm(1))
    }
    (model, topicSums)
  }

  def createMatrixAndVector(numTopics: Int, numTerms: Int, MyRandom: MyRandom): (MyDenseMatrix, MyDenseVector) = {
    val topicTermCounts: MyDenseMatrix = new MyDenseMatrix(numTopics, numTerms)
    val topicSums = new MyDenseVector(numTopics)
    if (MyRandom != null) {
      for (x <- 0 until numTopics; term <- 0 until numTerms) {
        topicTermCounts.viewRow(x).set(term, MyRandom.nextDouble)
      }
    }
    for (x <- 0 until numTopics) {
      topicSums.set(x, if (MyRandom == null) 1.0 else topicTermCounts.viewRow(x).norm(1))
    }
    (topicTermCounts, topicSums)
  }
}

object PathFilters {

  private final val PART_FILE_INSTANCE: PathFilter = new PathFilter {
    def accept(path: Path): Boolean = {
      val name: String = path.getName
      name.startsWith("part-") && !name.endsWith(".crc")
    }
  }
  /**
   * Pathfilter to read the final clustering file.
   */

  private final val CLUSTER_FINAL: PathFilter = new PathFilter {
    def accept(path: Path): Boolean = {
      val name: String = path.getName
      name.startsWith("clusters-") && name.endsWith("-final")
    }
  }
  private final val LOGS_CRC_INSTANCE: PathFilter = new PathFilter {
    def accept(path: Path): Boolean = {
      val name: String = path.getName
      !(name.endsWith(".crc") || name.startsWith(".") || name.startsWith("_"))
    }
  }

  /**
   * @return { @link PathFilter} that accepts paths whose file name starts with "part-". Excludes
   *         ".crc" files.
   */
  def partFilter: PathFilter = {
    PART_FILE_INSTANCE
  }

  /**
   * @return { @link PathFilter} that accepts paths whose file name starts with "part-" and ends with "-final".
   */
  def finalPartFilter: PathFilter = {
    CLUSTER_FINAL
  }

  /**
   * @return { @link PathFilter} that rejects paths whose file name starts with "_" (e.g. Cloudera
   *         _SUCCESS files or Hadoop _logs), or "." (e.g. local hidden files), or ends with ".crc"
   */
  def logsCRCFilter: PathFilter = {
    LOGS_CRC_INSTANCE
  }
}

class LdaStart {

}


