package org.apache.lucene.demo;
//测试
/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Date;

/** Index all text files under a directory.
 * <p>
 * This is a command-line application demonstrating simple Lucene indexing.
 * Run it with no command-line arguments for usage information.
 */
public class IndexFiles {
  
  private IndexFiles() {}

  /** Index all text files under a directory. */
  public static void main(String[] args) {
    String usage = "java org.apache.lucene.demo.IndexFiles"
                 + " [-index INDEX_PATH] [-docs DOCS_PATH] [-update]\n\n"
                 + "This indexes the documents in DOCS_PATH, creating a Lucene index"
                 + "in INDEX_PATH that can be searched with SearchFiles"
                 +"目录为空";
    String indexPath = "index";
    String docsPath = "test";
    boolean create = true;
    for(int i=0;i<args.length;i++) {//取得args的长度,这样可以对数组里面的值进行充分的解析和赋值
      if ("-index".equals(args[i])) {
    	  //-index 是所有索引信息存放的文件系统的目录名称
    	//每一次循环都会判断这个if，如果出现的args[i] 中的位置上匹配到-index，
        //那么i的下一个位子就是我们要的设置的索引的位子，我们将它设置为indexPath 
        indexPath = args[i+1];
        i++;
      } else if ("-docs".equals(args[i])) {
    	  //-doc是要被索引的文件目录的位置
        docsPath = args[i+1];
        i++;
      } else if ("-update".equals(args[i])) {
    	  //-update 告诉indexFiles如果已经存在索引，不要删除，
    	  //如果-updat没有给，indexfiles在索引文件之前先清除
        create = false;
      }
    }

    if (docsPath == null) {
      System.err.println("Usage: " + usage);
      System.exit(1);
    }

    final File docDir = new File(docsPath);//定义了一个final类型的File类型，就是放文本信息的文件夹
    //有需要进行判断，我通过file名字docDir去判断
    //如果我的文件夹是没有的或者不能被只读，那么相当于这个是没有用
    //所以会执行文件夹不存在或者不能背读取，请检查
    if (!docDir.exists() || !docDir.canRead()) {
      System.out.println("Document directory '" +docDir.getAbsolutePath()+ "' does not exist or is not readable, please check the path");
      System.exit(1);//并且退出了虚拟机
    }
    
    Date start = new Date();
    try {
      System.out.println("Indexing to directory '" + indexPath + "'...");
      //lucene的directory是被indexwriter用来存放索引信息的。
      //我们使用的是FSDirectory接口，还有一些其他的目录子类可以写入内存，数据库等
      Directory dir = FSDirectory.open(new File(indexPath));
      //analyzer是把文本分为标记，并对这些标记进行其他操作如过滤，插入等；
      //我们用的是标准分词
      //StopWords是通用语言如冠词或其他没有寻找意义的词
      //不同的语言有不同的语言规则，对于不同的语言，要正确的选择annlyzer
      Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_45);
      //IndexWriterConfig实例对hold IndexWriter的所有配置
      IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_45, analyzer);
      //例如，我们基于-update命令行参数的值设置OpenMode的使用
      //判断创建索引表的时候，要不要重新创建
      if (create) {
        // Create a new index in the directory, removing any创建一个新的索引表
        // previously indexed documents:并删除当前已经存在的索引表
        iwc.setOpenMode(OpenMode.CREATE);//设置配置属性，并创建索引表的配置为OpenMode.CREATE
      } else {
        // Add new documents to an existing index:
        iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
      }

      // Optional: for better indexing performance, if you
      // are indexing many documents, increase the RAM
      // buffer.  But if you do this, increase the max heap
      // size to the JVM (eg add -Xmx512m or -Xmx1g):
      //
      // iwc.setRAMBufferSizeMB(.0);
      //如果你的索引文件有好多，为了有更好的效果比如速度啊，
      //那么需要增加JVM的内存堆内存
      //什么是对内存，堆内存是存放class文件对象的以及一些数组啊
      // iwc.setRAMBufferSizeMB(256.0);

      //设置好索引配置器以后，设定索引操作器，索引操作器就是要传入索引配置器以及索引库
      //这样才能有目标对索引进行操作
      IndexWriter writer = new IndexWriter(dir, iwc);
      indexDocs(writer, docDir);
      //indexDocc()这个递归函数是抓取目录和创建Document对象
      //Document是一个简单的数据对象表示文件的文本内容和创建的时间，地点
      //这些实例被添加到IndexWriter中
      //如果-update参数给出， 
      //IndexWriterConfig OpenMode会设置OpenMode.CREATE_OR_APPEND，
      //而不是添加文档到索引中
      //IndexWriter会用相同的标识符试图找到一个已经索引的文档，更新索引
      //在这里，文件路径作为这个标识符
      //如果存在，就从索引中删除，并添加新的文档到索引中

      // NOTE: if you want to maximize search performance,
      // you can optionally call forceMerge here.  This can be
      // a terribly costly operation, so generally it's only
      // worth it when your index is relatively static (ie
      // you're done adding documents to it):
      //
      // writer.forceMerge(1);
      //这里有说明，如果你想最大话搜索的性能，那么你可以选择进行优化索引器，
      //这可能是一个代价高昂的操作，所以一般只有当你的索引是相对静态的时候值得
      writer.close();

      Date end = new Date();
      System.out.println(end.getTime() - start.getTime() + " total milliseconds");

    } catch (IOException e) {//写出是哪个类的异常
      System.out.println(" caught a " + e.getClass() +
       "\n with message: " + e.getMessage());
    }
  }

  /**
   * Indexes the given file using the given writer, or if a directory is given,
   * recurses over files and directories found under the given directory.
   * 
   * NOTE: This method indexes one document per input file.  This is slow.  For good
   * throughput, put multiple documents into your input file(s).  An example of this is
   * in the benchmark module, which can create "line doc" files, one document per line,
   * using the
   * <a href="../../../../../contrib-benchmark/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.html"
   * >WriteLineDocTask</a>.
   *  
   * @param writer Writer to the index where the given file/dir info will be stored
   * @param file The file to index, or the directory to recurse into to find files to index
   * @throws IOException If there is a low-level I/O error
   */
  static void indexDocs(IndexWriter writer, File file)
    throws IOException {
    // do not try to index files that cannot be read不要把索引文件设置为不可读甚至不可写
    if (file.canRead()) {
      if (file.isDirectory()) {
    	//如果是文件，那么将文件中的文本列入到一个string类型的数组中
        String[] files = file.list();
        // an IO error could occur如果发生了一个文件读写错误
        if (files != null) { 
          for (int i = 0; i < files.length; i++) {
            indexDocs(writer, new File(file, files[i]));//递归
          }
        }
      } else {

        FileInputStream fis;
        try {
          fis = new FileInputStream(file);
        } catch (FileNotFoundException fnfe) {
          // at least on windows, some temporary files raise this exception with an "access denied" message
          // checking if the file can be read doesn't help
          return;
        }

        try {

          // make a new, empty document
          Document doc = new Document();

          // Add the path of the file as a field named "path".  Use a
          // field that is indexed (i.e. searchable), but don't tokenize 
          // the field into separate words and don't index term frequency
          // or positional information:
          //字段设置，可以设置多个的，用key-value的方式,
          //一共四个参数
          //1.设定是的索引的信息名相当于key
          //2.设定的是索引的信息值相当于value
          //3.设定的是索引的是不是要保存啊
        //4.这个当然是字段的索引的参数设置了
          Field pathField = new StringField("path", file.getPath(), Field.Store.YES);
          doc.add(pathField);

          // Add the last modified date of the file a field named "modified".
          // Use a LongField that is indexed (i.e. efficiently filterable with
          // NumericRangeFilter).  This indexes to milli-second resolution, which
          // is often too fine.  You could instead create a number based on
          // year/month/day/hour/minutes/seconds, down the resolution you require.
          // For example the long value 4 would mean
          // February 17, 1, 2-3 PM.
          doc.add(new LongField("modified", file.lastModified(), Field.Store.NO));

          // Add the contents of the file to a field named "contents".  Specify a Reader,
          // so that the text of the file is tokenized and indexed, but not stored.
          // Note that FileReader expects the file to be in UTF-8 encoding.
          // If that's not the case searching for special characters will fail.
          doc.add(new TextField("contents", new BufferedReader(new InputStreamReader(fis, "UTF-8"))));

          if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
            // New index, so we just add the document (no old document can be there):
            System.out.println("adding " + file);
            writer.addDocument(doc);
          } else {
            // Existing index (an old copy of this document may have been indexed) so 
            // we use updateDocument instead to replace the old one matching the exact 
            // path, if present:
            System.out.println("updating " + file);
            writer.updateDocument(new Term("path", file.getPath()), doc);
          }
          
        } finally {
          fis.close();
        }
      }
    }
  }
}
