package com.qing.write

import java.io.{File, FileOutputStream, PrintWriter}
import java.nio.file.Paths

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FSDataInputStream, FileSystem, FileUtil, Path}
import org.apache.hadoop.io.IOUtils
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.log4j.{Level, Logger}
import org.apache.lucene.analysis.standard.StandardAnalyzer
import org.apache.lucene.document.{Document, Field, TextField}
import org.apache.lucene.index.{IndexWriter, IndexWriterConfig}
import org.apache.lucene.store.FSDirectory

import scala.collection.mutable.ListBuffer
import scala.io.Source

/**
  * Created by Administrator on 2017/8/16 0016.
  */
object IndexFile {
  def main(args: Array[String]): Unit = {
    Logger.getRootLogger.setLevel(Level.OFF)

    //    var path: String = "hdfs://175.102.18.112:8020/home/wuliao/log/data"
    //    var path: String = "/mnt/disk/log_data/messagelog/*/"
    var path: String = "/Users/wuliao/Desktop/messagelog/*/"
    if (args.length > 0 && args.apply(0).length > 0) {
      path = args.apply(0)
    }

    val hadoopConfig: Configuration = sc.hadoopConfiguration
    hadoopConfig.set("fs.hdfs.impl", classOf[org.apache.hadoop.hdfs.DistributedFileSystem].getName)
    hadoopConfig.set("fs.file.impl", classOf[org.apache.hadoop.fs.LocalFileSystem].getName)


    val rdd = sc.textFile(path, Integer.parseInt(sc.getConf.get("lucene.node")))

    rdd.mapPartitionsWithIndex((index, it) => {
      val list = new ListBuffer[Tuple2[Integer, String]]()
      while (it.hasNext) {
        val s = it.next()
        list.append(new Tuple2[Integer, String](index, s))
      }
      list.iterator
    })
      .partitionBy(new LucenePartitioner(Integer.parseInt(sc.getConf.get("lucene.node"))))
      .mapPartitionsWithIndex((index, it) => {
        //本地和网络上检查文件
        //        val conf = new Configuration()
        //        conf.set("fs.default.name", sc.getConf.get("hdfs.master"))
        //        conf.set("fs.hdfs.impl", classOf[org.apache.hadoop.hdfs.DistributedFileSystem].getName)
        //        conf.set("fs.file.impl", classOf[org.apache.hadoop.fs.LocalFileSystem].getName)
        //
        //        val fs = FileSystem.get(conf)
        //
        //        var localTime: Long = 0L
        //        var hdfsTime: Long = 0L
        //
        val dir = sc.getConf.get("lucene.path")
        //        val str = sc.getConf.get("hdfs.path")
        //
        //        val file = new File(dir + index)
        //        if (!file.exists()) {
        //          file.mkdirs()
        //        }
        //        if (!fs.exists(new Path(str + index))) {
        //          fs.mkdirs(new Path(str + index))
        //        }
        //
        //        val timeFile = new File(dir + index + "/time.txt")
        //        if (!timeFile.exists()) {
        //          localTime = 0
        //          timeFile.createNewFile()
        //        } else {
        //          localTime = Source.fromFile(timeFile).getLines().next().toLong
        //        }
        //        val writer = new PrintWriter(timeFile)
        //        writer.println(System.currentTimeMillis())
        //        writer.close()
        //
        //        if (fs.exists(new Path(str + index))) {
        //          if (fs.exists(new Path(str + index + "/time.txt"))) {
        //            val hdfsInStream = fs.open(new Path(str + index + "/time.txt"))
        //            val ioBuffer = new Array[Byte](1024)
        //            val len = hdfsInStream.read(ioBuffer)
        //            localTime = new String(ioBuffer, 0, len).toLong
        //            hdfsInStream.close()
        //          }
        //        } else {
        //          hdfsTime = 0
        //        }
        //
        //        if (localTime < hdfsTime) {
        //          //从hdfs上下载
        //          downFromHdfs(fs, str + index, dir + index)
        //        }
        //        //从hdfs上删除
        //        deleteDir(fs, str + index)

        val directory = FSDirectory.open(Paths.get(dir + index))
        val analyzer = new StandardAnalyzer()
        val writerConfig = new IndexWriterConfig(analyzer)
        val indexWriter = new IndexWriter(directory, writerConfig)

        while (it.hasNext) {
          val s = it.next()
          indexWriter.addDocument(new SimpleParse().parse(s._2))
        }
        indexWriter.commit()
        indexWriter.close()
        //        //上传到hdfs
        //        upload2Hdfs(fs, dir + index, str + index)
        //        fs.close()
        it
      })

      .count()
  }


  val sc = {
    val path = "/mnt/disk/lucene/"

    //    val path = "/Users/wuliao/Desktop/lucene/"
    val conf = new SparkConf()
      .setAppName("hello")
      .set("lucene.node", "10")
      .set("lucene.path", path)
      .set("spark.driver.cores", "1")
      .set("spark.driver.memory", "4096m")
      .set("spark.executor.memory", "4096m")
            .setJars(Array("file:///mnt/disk/jar/spark-lucene-1.0-SNAPSHOT-jar-with-dependencies.jar"))
      //      .setJars(Array("file:///Users/wuliao/IdeaProjects/sparklucene/target/spark-lucene-1.0-SNAPSHOT-jar-with-dependencies.jar"))
//      .setMaster("local")
          .setMaster("spark://175.102.18.112:7077")
    new SparkContext(conf)
  }


  //  def downFromHdfs(hdfs: FileSystem, srcPath: String, dstPath: String): Unit = {
  //
  //    val dstDir = new File(dstPath)
  //    if (!dstDir.exists()) {
  //      dstDir.mkdirs()
  //    }
  //    val srcFileStatus = hdfs.listStatus(new Path(srcPath))
  //    val srcFilePath = FileUtil.stat2Paths(srcFileStatus)
  //
  //    for (s <- srcFilePath) {
  //      val fileNamePosi = s.toString.lastIndexOf('/');
  //      val fileName = s.toString.substring(fileNamePosi + 1);
  //      download(hdfs, srcPath + '/' + fileName, dstPath + '/' + fileName);
  //    }
  //  }
  //
  //
  //  def upload2Hdfs(hdfs: FileSystem, srcPath: String, dstPath: String): Unit = {
  //
  //
  //    val srcFile = new File(srcPath).list()
  //    for (s <- srcFile) {
  //      upload(hdfs, srcPath + '/' + s, dstPath + '/' + s);
  //    }
  //  }
  //
  //
  //  def download(hdfs: FileSystem, srcPath: String, dstPath: String): Unit = {
  //    if (hdfs.isFile(new Path(srcPath))) {
  //      downloadFile(hdfs, srcPath, dstPath)
  //    }
  //    else {
  //      downFromHdfs(hdfs, srcPath, dstPath)
  //    }
  //  }
  //
  //
  //  def upload(hdfs: FileSystem, srcPath: String, dstPath: String): Unit = {
  //    if (new File(srcPath).isFile) {
  //      uploadFile(hdfs, srcPath, dstPath)
  //    }
  //    else {
  //      upload2Hdfs(hdfs, srcPath, dstPath)
  //    }
  //  }
  //
  //  def downloadFile(hdfs: FileSystem, srcPath: String, dstPath: String): Unit = {
  //    val in: FSDataInputStream = hdfs.open(new Path(srcPath))
  //    val out: FileOutputStream = new FileOutputStream(dstPath)
  //    IOUtils.copyBytes(in, out, 4096 * 1024, false)
  //    IOUtils.closeStream(in)
  //    IOUtils.closeStream(out)
  //  }
  //
  //  def uploadFile(hdfs: FileSystem, localFile: String, hdfsFile: String): Unit = {
  //    val src = new Path(localFile)
  //    val dst = new Path(hdfsFile)
  //    hdfs.copyFromLocalFile(false, src, dst)
  //  }
  //
  //
  //  def deleteDir(hdfs: FileSystem, dir: String): Unit = {
  //    hdfs.delete(new Path(dir), true)
  //  }


}