package org.apache.lucene.demo;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.chenlb.mmseg4j.analysis.MMSegAnalyzer;

public class JindexFiles {
	private final static Logger logger = LoggerFactory
			.getLogger(JindexFiles.class);
	private static String rootDir = "E:/backup/org.apache.lucene/index/";
	static List<String> datFilter = new ArrayList<String>();
	static List<String> docsLst = new ArrayList<String>();

	/**
	 * @param args
	 */
	public static void main(String[] args) {
		// TODO Auto-generated method stub
		JindexFiles jf = new JindexFiles();
		jf.doIndex();

	}

	static {
		datFilter.add("txt");

		docsLst.add("D:/backup/");
		docsLst.add("D:/download/");
		docsLst.add("D:/jskyme/");
/*		docsLst.add("F:/");
		docsLst.add("G:/");
		docsLst.add("L:/");
		docsLst.add("M:/");
		docsLst.add("N:/");*/

		docsLst.add("E:/backup");
		docsLst.add("E:/jskyme");
	}

	private void doIndex() {
		for (int i = 0; i < docsLst.size(); i++) {
			String docItem = docsLst.get(i);
			doIndexItem(docItem);
		}
	}

	private void doIndexItem(String docsPath) {

		/*
		 * datFilter.add("exe"); datFilter.add("tar"); datFilter.add("gz");
		 * datFilter.add("bin"); datFilter.add("zip"); datFilter.add("rar");
		 * datFilter.add("doc"); datFilter.add("docx"); datFilter.add("pdf");
		 */

		String indexPath = rootDir + getIndex(docsPath);
		boolean create = true;
		final File docDir = new File(docsPath);
		if (!docDir.exists() || !docDir.canRead()) {
			logln("Document directory '"
					+ docDir.getAbsolutePath()
					+ "' does not exist or is not readable, please check the path");
			System.exit(1);
		}

		Date start = new Date();
		try {
			logln("Indexing to directory '" + indexPath + "'...");

			Directory dir = FSDirectory.open(new File(indexPath));
			Analyzer analyzer = new MMSegAnalyzer();// StandardAnalyzer(Version.LUCENE_31);
			IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_31,
					analyzer);

			if (create) {
				// Create a new index in the directory, removing any
				// previously indexed documents:
				iwc.setOpenMode(OpenMode.CREATE);
			} else {
				// Add new documents to an existing index:
				iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
			}

			// Optional: for better indexing performance, if you
			// are indexing many documents, increase the RAM
			// buffer. But if you do this, increase the max heap
			// size to the JVM (eg add -Xmx512m or -Xmx1g):
			//
			// iwc.setRAMBufferSizeMB(256.0);

			IndexWriter writer = new IndexWriter(dir, iwc);
			indexDocs(writer, docDir);

			// NOTE: if you want to maximize search performance,
			// you can optionally call optimize here. This can be
			// a costly operation, so generally it's only worth
			// it when your index is relatively static (ie you're
			// done adding documents to it):
			//
			// writer.optimize();

			writer.close();

			Date end = new Date();
			System.out.println(end.getTime() - start.getTime()
					+ " total milliseconds");

		} catch (IOException e) {
			System.out.println(" caught a " + e.getClass()
					+ "\n with message: " + e.getMessage());
		}
	}

	private void logln(String str) {
		System.out.println(str);
	}

	private String getIndex(String docsPath) {
		String str = docsPath.replaceAll(":/", "-");
		str = str.replaceAll("/", ".");
		logln(str);
		return str;
	}

	/**
	 * Indexes the given file using the given writer, or if a directory is
	 * given, recurses over files and directories found under the given
	 * directory.
	 * 
	 * NOTE: This method indexes one document per input file. This is slow. For
	 * good throughput, put multiple documents into your input file(s). An
	 * example of this is in the benchmark module, which can create "line doc"
	 * files, one document per line, using the <a href=
	 * "../../../../../contrib-benchmark/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.html"
	 * >WriteLineDocTask</a>.
	 * 
	 * @param writer
	 *            Writer to the index where the given file/dir info will be
	 *            stored
	 * @param parDir
	 *            The file to index, or the directory to recurse into to find
	 *            files to index
	 * @throws IOException
	 */
	void indexDocs(IndexWriter writer, File parDir) throws IOException {
		// do not try to index files that cannot be read
		if (parDir.canRead()) {
			if (parDir.isDirectory()) {
				if(parDir.getName().startsWith("."))return;
				String[] files = parDir.list();
				
				// an IO error could occur
				if (files != null) {
					for (int i = 0; i < files.length; i++) {
						indexDocs(writer, new File(parDir, files[i]));
					}
				}
			} else {
				logger.info("index file:" + parDir.getPath());
				FileInputStream fis;
				try {
					fis = new FileInputStream(parDir);
				} catch (FileNotFoundException fnfe) {
					// at least on windows, some temporary files raise this
					// exception with an "access denied" message
					// checking if the file can be read doesn't help
					return;
				}

				try {

					// make a new, empty document
					Document doc = new Document();

					// Add the path of the file as a field named "path". Use a
					// field that is indexed (i.e. searchable), but don't
					// tokenize
					// the field into separate words and don't index term
					// frequency
					// or positional information:
					Field pathField = new Field("path", parDir.getPath(),
							Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
					doc.add(pathField);

					// Add the last modified date of the file a field named
					// "modified".
					// Use a NumericField that is indexed (i.e. efficiently
					// filterable with
					// NumericRangeFilter). This indexes to milli-second
					// resolution, which
					// is often too fine. You could instead create a number
					// based on
					// year/month/day/hour/minutes/seconds, down the resolution
					// you require.
					// For example the long value 2011021714 would mean
					// February 17, 2011, 2-3 PM.
					NumericField modifiedField = new NumericField("modified");
					modifiedField.setLongValue(parDir.lastModified());
					doc.add(modifiedField);

					// Add the contents of the file to a field named "contents".
					// Specify a Reader,
					// so that the text of the file is tokenized and indexed,
					// but not stored.
					// Note that FileReader expects the file to be in UTF-8
					// encoding.
					// If that's not the case searching for special characters
					// will fail.
					doc.add(new Field("name", new StringReader(parDir.getName())));
					if (isIgnore(parDir)) {
						doc.add(new Field("contents",
								"this Ignore contents of file",
								Field.Store.YES,
								Field.Index.NOT_ANALYZED_NO_NORMS));
					} else {
						doc.add(new Field("contents", new BufferedReader(
								new InputStreamReader(fis, "GBK"))));
					}
					if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
						// New index, so we just add the document (no old
						// document can be there):
						// System.out.println("adding " + file);
						writer.addDocument(doc);
					} else {
						// Existing index (an old copy of this document may have
						// been indexed) so
						// we use updateDocument instead to replace the old one
						// matching the exact
						// path, if present:
						System.out.println("updating " + parDir);
						writer.updateDocument(
								new Term("path", parDir.getPath()), doc);
					}

				} finally {
					fis.close();
				}
			}
		}
	}

	/**
	 * @param file
	 * @return
	 */
	private boolean isIgnore(File file) {
		for (int i = 0; i < datFilter.size(); i++) {
			String dat = datFilter.get(i);
			if (file.getName().endsWith("." + dat)) {
				return false;
			}
		}
		return true;
	}
}
