package com.qing.write

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.log4j.{Level, Logger}
import org.apache.lucene.analysis.standard.StandardAnalyzer
import org.apache.lucene.document.{Document, Field, TextField}
import org.apache.lucene.index.{IndexWriter, IndexWriterConfig}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ListBuffer

/**
  * Created by Administrator on 2017/8/16 0016.
  */
object TestIndexFile {
  def main(args: Array[String]): Unit = {
    Logger.getRootLogger.setLevel(Level.OFF)

    var path = "hdfs://127.0.0.1:9000/data/data"
    //    var path = "file:///Users/wuliao/Desktop/data"

    //    var path = "hdfs://127.0.0.1:9000/data/data"
    if (args.length > 0 && args.apply(0).length > 0) {
      path = args.apply(0)
    }

    val hadoopConfig: Configuration = sc.hadoopConfiguration
    hadoopConfig.set("fs.hdfs.impl", classOf[org.apache.hadoop.hdfs.DistributedFileSystem].getName)
    hadoopConfig.set("fs.file.impl", classOf[org.apache.hadoop.fs.LocalFileSystem].getName)

    val rdd = sc.textFile(path, 10)

    val accum = sc.accumulator(0, "My Accumulator")


    rdd.mapPartitionsWithIndex((index, it) => {
      val list = new ListBuffer[Tuple2[Integer, String]]()
      while (it.hasNext) {
        val s = it.next()
        list.append(new Tuple2[Integer, String](index, s))
      }
      list.iterator
    }).partitionBy(new LucenePartitioner(10))
      .mapPartitionsWithIndex((index, it) => {
        accum += 1
        val list = new ListBuffer[Tuple2[Integer, String]]()
        //      val directory = FSDirectory.open(Paths.get(dir))
        val conf = new Configuration()
        conf.set("fs.default.name", "hdfs://127.0.0.1:9000")
        conf.set("fs.hdfs.impl", classOf[org.apache.hadoop.hdfs.DistributedFileSystem].getName)
        conf.set("fs.file.impl", classOf[org.apache.hadoop.fs.LocalFileSystem].getName)

        val path = new Path("hdfs://127.0.0.1:9000/data/index/" + index)

        println("index....=" + index)

        val lock = new Path(path.toString + "/write.lock")
        val fs = FileSystem.get(conf)
        if (fs.exists(lock)) {
          fs.delete(lock)
        }

        //      val luceneDir = new HdfsDirectory(path,conf)
        //      val analyzer = new StandardAnalyzer()
        //      val writerConfig = new IndexWriterConfig(analyzer)
        //      val indexWriter = new IndexWriter(luceneDir, writerConfig)

        //      while (it.hasNext) {
        //        val s = it.next()
        //        list.append(new Tuple2[Integer, String](index, s._2))
        //        val document = new Document()
        //        document.add(new Field("_all", s._2, TextField.TYPE_STORED))
        //        indexWriter.addDocument(document)
        //      }
        //      indexWriter.commit()
        //      indexWriter.close()
        list.iterator
      })
      .count()
    println(accum + ".............")

  }


  val sc = {
    //    val path = "/mnt/disk/lucene/"
    val path = "/Users/wuliao/Desktop/lucene"
    //    val path = "G:\\BigData\\data\\index"
    val conf = new SparkConf()
      .setAppName("hello")
      .set("lucene.node", "10")
      .set("lucene.path", path)
      .set("spark.driver.memory", "2048m")
      .set("spark.executor.memory", "2048m")
      //      .setJars(Array("file:///mnt/disk/jar/spark-lucene-1.0-SNAPSHOT-jar-with-dependencies.jar"))
      .setJars(Array("file:///Users/wuliao/IdeaProjects/sparklucene/target/spark-lucene-1.0-SNAPSHOT-jar-with-dependencies.jar"))
      .setMaster("local")
    //      .setMaster("spark://175.102.18.112:7077")
    new SparkContext(conf)
  }
}