
import ArgsParseUtil
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.mutable.ListBuffer

class spark_common {
  def main(args: Array[String]): Unit = {
  import org.apache.hadoop.fs.{FileSystem, Path}
  import org.rocksdb.{Options, RocksDB}


    val (argsList, argsMap) = ArgsParseUtil.parseArgs(args)
    val local = argsMap.getOrElse("local", "true").toBoolean // 是否是本地模式
    // 1.配置Spark
    val input = argsMap.getOrElse("input", "")
    val output = argsMap.getOrElse("output", "")
    val conf = {
      if (local)
        new SparkConf().setAppName("TextFileWIthSplit").setMaster("local[1]")
      else
        new SparkConf().setAppName("TextFileWIthSplit")
    }

  val conf = new SparkConf().setAppName("RocksDB Example")
  val sc = new SparkContext(conf)
  // 设置RocksDB选项
  val options = new Options().setCreateIfMissing(false)
  // 加载RocksDB数据文件到Spark的RDD中
  val file = "/path/to/rocksdb/data"
  //newAPIHadoopFile
  sc.newAPI
  val rocksdbRDD = sc.newAPIHadoopFile(
    file,
    classOf[org.apache.hadoop.mapreduce.lib.input.TextInputFormat],
    classOf[org.apache.hadoop.io.Text],
    classOf[org.apache.hadoop.io.Text]
  ).map(_._2.toString)
  // 在每个分区上进行范围查询操作
  rocksdbRDD.mapPartitions(records => {
    // 打开RocksDB数据库
    val db = RocksDB.open(options, "/path/to/rocksdb")
    // 获取迭代器
    val iter = db.newIterator()
    // 设置范围条件
    iter.seek(where.start_key.getBytes)
    val endKey = where.end_key.getBytes
    // 遍历迭代器并查找符合范围条件的记录
    val results = new ListBuffer[String]()
    while (iter.isValid && iter.key().compareTo(endKey) <= 0) {
      val key = new String(iter.key())
      val value = new String(iter.value())
      results += s"$key:$value"
      iter.next()
    }
    // 关闭迭代器和数据库
    iter.close()
    db.close()
    results.iterator
  }).saveAsTextFile("/path/to/output")

  }
}
