package com.qing.write

import java.io.{File, FileOutputStream, PrintWriter}
import java.nio.file.Paths

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FSDataInputStream, FileSystem, FileUtil, Path}
import org.apache.hadoop.io.IOUtils
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.log4j.{Level, Logger}
import org.apache.lucene.analysis.standard.StandardAnalyzer
import org.apache.lucene.document.{Document, Field, TextField}
import org.apache.lucene.index.{DirectoryReader, IndexWriter, IndexWriterConfig}
import org.apache.lucene.search.IndexSearcher
import org.apache.lucene.store.FSDirectory
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD

import scala.collection.mutable.ListBuffer
import scala.io.Source
import scala.util.control.Breaks.{break, breakable}

/**
  * Created by Administrator on 2017/8/16 0016.
  */
object IndexFile {

  var broadcastVar: Broadcast[Map[String, String]] = null

  def main(args: Array[String]): Unit = {
    Logger.getRootLogger.setLevel(Level.OFF)

    var path: String = "/mnt/disk/log_data/messagelog/*/"
    if (args.length > 0 && args.apply(0).length > 0) {
      path = args.apply(0)
    }

    broadcastVar = sc.broadcast(Map("lucene.node" -> sc.getConf.get("lucene.node"),
      "lucene.path" -> sc.getConf.get("lucene.path")))


    val hadoopConfig: Configuration = sc.hadoopConfiguration
    hadoopConfig.set("fs.hdfs.impl", classOf[org.apache.hadoop.hdfs.DistributedFileSystem].getName)
    hadoopConfig.set("fs.file.impl", classOf[org.apache.hadoop.fs.LocalFileSystem].getName)


    val rdd = sc.textFile(path, 10)

    //清楚lock
    clearLock()
    //检测是否已经存在lucene










    val count = sc.parallelize(Seq("count lucene number function"), 30)
      .mapPartitions(it => {
        val path = "/mnt/disk/lucene/"
        val node = 10

        var isRead = false
        breakable {
          for (a <- 0 until node) {
            val file = new File(path + a)
            val lock = new File(path + a + "/lock")
            if (file.exists && !lock.exists) {
              if (lock.createNewFile()) {
                isRead = true
                break()
              }
            }
          }
        }
        if (isRead) {
          List[Integer](1).iterator
        } else {
          List[Integer](0).iterator
        }
      }).filter(_ > 0).cache().count()
    clearLock()
    if (count > 0) {
      indexFile(rdd)
    } else {
      createIndex(rdd)
    }

    //本地和网络上检查文件
    //        val conf = new Configuration()
    //        conf.set("fs.default.name", sc.getConf.get("hdfs.master"))
    //        conf.set("fs.hdfs.impl", classOf[org.apache.hadoop.hdfs.DistributedFileSystem].getName)
    //        conf.set("fs.file.impl", classOf[org.apache.hadoop.fs.LocalFileSystem].getName)
    //
    //        val fs = FileSystem.get(conf)
    //
    //        var localTime: Long = 0L
    //        var hdfsTime: Long = 0L
    //
    //        val dir = sc.getConf.get("lucene.path")
    //        val str = sc.getConf.get("hdfs.path")
    //
    //        val file = new File(dir + index)
    //        if (!file.exists()) {
    //          file.mkdirs()
    //        }
    //        if (!fs.exists(new Path(str + index))) {
    //          fs.mkdirs(new Path(str + index))
    //        }
    //
    //        val timeFile = new File(dir + index + "/time.txt")
    //        if (!timeFile.exists()) {
    //          localTime = 0
    //          timeFile.createNewFile()
    //        } else {
    //          localTime = Source.fromFile(timeFile).getLines().next().toLong
    //        }
    //        val writer = new PrintWriter(timeFile)
    //        writer.println(System.currentTimeMillis())
    //        writer.close()
    //
    //        if (fs.exists(new Path(str + index))) {
    //          if (fs.exists(new Path(str + index + "/time.txt"))) {
    //            val hdfsInStream = fs.open(new Path(str + index + "/time.txt"))
    //            val ioBuffer = new Array[Byte](1024)
    //            val len = hdfsInStream.read(ioBuffer)
    //            localTime = new String(ioBuffer, 0, len).toLong
    //            hdfsInStream.close()
    //          }
    //        } else {
    //          hdfsTime = 0
    //        }
    //
    //        if (localTime < hdfsTime) {
    //          //从hdfs上下载
    //          downFromHdfs(fs, str + index, dir + index)
    //        }
    //        //从hdfs上删除
    //        deleteDir(fs, str + index)

    //        val directory = FSDirectory.open(Paths.get(dir + index))
    //        val analyzer = new StandardAnalyzer()
    //        val writerConfig = new IndexWriterConfig(analyzer)
    //        val indexWriter = new IndexWriter(directory, writerConfig)
    //
    //        while (it.hasNext) {
    //          val s = it.next()
    //          indexWriter.addDocument(new SimpleParse().parse(s._2))
    //        }
    //        indexWriter.commit()
    //        indexWriter.close()
    //        //上传到hdfsdir + index, str + index)
    //        fs.close()


  }

  def clearLock(): Unit = {
    sc.parallelize(Seq("apark-lucene is a good util"), 30)
      .mapPartitions(it => {
        val path = "/mnt/disk/lucene/"
        val num = 10 * 3
        for (a <- 0 until num) {
          val file = new File(path + a)
          val lock = new File(path + a + "/lock")
          if (file.exists() && lock.exists()) {
            lock.delete()
          }
        }
        it
      }).count()
  }


  def createIndex(rdd: RDD[String]): Unit = {
    rdd.mapPartitionsWithIndex((index, it) => {
      val list = new ListBuffer[Tuple2[Integer, String]]()
      while (it.hasNext) {
        val s = it.next()
        list.append(new Tuple2[Integer, String](index, s))
      }
      list.iterator
    })
      .partitionBy(new LucenePartitioner(10))
      .mapPartitionsWithIndex((index, it) => {
        val path = "/mnt/disk/lucene/"
        val directory = FSDirectory.open(Paths.get(path + index))
        val analyzer = new StandardAnalyzer()
        val writerConfig = new IndexWriterConfig(analyzer)
        val indexWriter = new IndexWriter(directory, writerConfig)
        while (it.hasNext) {
          val s = it.next()
          indexWriter.addDocument(new SimpleParse().parse(s._2))
        }
        indexWriter.commit()
        indexWriter.close()
        List[String]().iterator
      }).count()
  }

  def indexFile(rdd: RDD[String]): Unit = {
    val dataRdd = rdd.mapPartitionsWithIndex((index, it) => {
      val list = new ListBuffer[Tuple2[Integer, String]]()
      while (it.hasNext) {
        val s = it.next()
        list.append(new Tuple2[Integer, String](index, s))
      }
      list.iterator
    })
      .partitionBy(new LucenePartitioner(10))
      .mapPartitions(it => {
        val path = "/mnt/disk/lucene/"
        var isWrite = false
        breakable {
          for (a <- 0 until 10) {
            val file = new File(path + a)
            val lock = new File(path + a + "/lock")
            if (file.exists && !lock.exists) {
              if (lock.createNewFile()) {

                val directory = FSDirectory.open(Paths.get(path + a))
                val analyzer = new StandardAnalyzer()
                val writerConfig = new IndexWriterConfig(analyzer)
                val indexWriter = new IndexWriter(directory, writerConfig)
                while (it.hasNext) {
                  val s = it.next
                  indexWriter.addDocument(new SimpleParse().parse(s._2))
                }
                indexWriter.commit
                indexWriter.close
                isWrite = true
                lock.delete
                break()

              }
            }
          }
        }
        if (isWrite) {
          List[(Integer, String)]().iterator
        } else {
          it
        }
      }).cache()

    writeRDD(dataRdd, dataRdd.count)
  }


  def writeRDD(rdd: RDD[(Integer, String)], count: Long): Unit = {
    var temp = count
    while (temp > 0) {
      temp = rdd.mapPartitions(it => {
        if (it.length > 0) {
          val path = "/mnt/disk/lucene/"
          var isWrite = false
          breakable {
            for (a <- 0 until 10) {
              val file = new File(path + a)
              val lock = new File(path + a + "/lock")
              if (file.exists && !lock.exists) {
                if (lock.createNewFile()) {
                  val directory = FSDirectory.open(Paths.get(path + a))
                  val analyzer = new StandardAnalyzer()
                  val writerConfig = new IndexWriterConfig(analyzer)
                  val indexWriter = new IndexWriter(directory, writerConfig)
                  while (it.hasNext) {
                    val s = it.next
                    indexWriter.addDocument(new SimpleParse().parse(s._2))
                  }
                  indexWriter.commit
                  indexWriter.close
                  isWrite = true
                  lock.delete
                  break()
                }
              }
            }
          }
          if (isWrite) {
            List[(Integer, String)]().iterator
          } else {
            it
          }
        } else {
          it
        }
      })
        .filter(_ != None)
        .count()
    }
  }


  val sc = {
    val path = "/mnt/disk/lucene/"

    val conf = new SparkConf()
      .setAppName("hello")
      .set("lucene.node", "10")
      .set("lucene.path", path)
      .set("spark.driver.cores", "1")
      .set("spark.driver.memory", "4096m")
      .set("spark.executor.memory", "4096m")
      .setJars(Array("file:///mnt/disk/jar/spark-lucene-1.0-SNAPSHOT-jar-with-dependencies.jar"))
      .setMaster("spark://175.102.18.112:7077")
    //            .setMaster("local")
    new SparkContext(conf)
  }


  def downFromHdfs(hdfs: FileSystem, srcPath: String, dstPath: String): Unit = {

    val dstDir = new File(dstPath)
    if (!dstDir.exists()) {
      dstDir.mkdirs()
    }
    val srcFileStatus = hdfs.listStatus(new Path(srcPath))
    val srcFilePath = FileUtil.stat2Paths(srcFileStatus)

    for (s <- srcFilePath) {
      val fileNamePosi = s.toString.lastIndexOf('/');
      val fileName = s.toString.substring(fileNamePosi + 1);
      download(hdfs, srcPath + '/' + fileName, dstPath + '/' + fileName);
    }
  }


  def upload2Hdfs(hdfs: FileSystem, srcPath: String, dstPath: String): Unit = {


    val srcFile = new File(srcPath).list()
    for (s <- srcFile) {
      upload(hdfs, srcPath + '/' + s, dstPath + '/' + s);
    }
  }


  def download(hdfs: FileSystem, srcPath: String, dstPath: String): Unit = {
    if (hdfs.isFile(new Path(srcPath))) {
      downloadFile(hdfs, srcPath, dstPath)
    }
    else {
      downFromHdfs(hdfs, srcPath, dstPath)
    }
  }


  def upload(hdfs: FileSystem, srcPath: String, dstPath: String): Unit = {
    if (new File(srcPath).isFile) {
      uploadFile(hdfs, srcPath, dstPath)
    }
    else {
      upload2Hdfs(hdfs, srcPath, dstPath)
    }
  }

  def downloadFile(hdfs: FileSystem, srcPath: String, dstPath: String): Unit = {
    val in: FSDataInputStream = hdfs.open(new Path(srcPath))
    val out: FileOutputStream = new FileOutputStream(dstPath)
    IOUtils.copyBytes(in, out, 4096 * 1024, false)
    IOUtils.closeStream(in)
    IOUtils.closeStream(out)
  }

  def uploadFile(hdfs: FileSystem, localFile: String, hdfsFile: String): Unit = {
    val src = new Path(localFile)
    val dst = new Path(hdfsFile)
    hdfs.copyFromLocalFile(false, src, dst)
  }


  def deleteDir(hdfs: FileSystem, dir: String): Unit = {
    hdfs.delete(new Path(dir), true)
  }


  def getLuceneCount(): Int ={
    sc.parallelize(Seq("count lucene number function"), 30)
      .mapPartitions(it => {
        val path = "/mnt/disk/lucene/"
        val node = 10

        var isRead = false
        breakable {
          for (a <- 0 until node) {
            val file = new File(path + a)
            val lock = new File(path + a + "/lock")
            if (file.exists && !lock.exists) {
              if (lock.createNewFile()) {
                isRead = true
                break()
              }
            }
          }
        }
        if (isRead) {
          List[Integer](1).iterator
        } else {
          List[Integer](0).iterator
        }
      }).filter(_ > 0).count().toInt
  }


}