package com.qing.search

import java.io.File

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{Partition, SparkContext}
import org.apache.spark.rdd.RDD

import scala.collection.mutable.ListBuffer
import scala.reflect.ClassTag

/**
  * Created by wuliao on 2017/8/17.
  */
private[search] abstract class AbstractLuceneRDD[T: ClassTag](@transient sc: SparkContext,
                                                              val params: java.util.Map[String, Array[String]])
  extends RDD[T](sc, Nil) {

  override def getPartitions: Array[Partition] = {
    esPartitions.zipWithIndex.map { case (path, idx) =>
      new LucenePartition(id, idx, path)
    }.toArray
  }

  @transient private[search] lazy val esPartitions = {
    val path = "/mnt/disk/lucene/"
    val nodeNum = 10 * 3
    val list = ListBuffer[String]()
    for (a <- 0 until nodeNum) {
      list.append(path + "&" + 10)
    }
    list
  }

}
