package ca.uwindsor.cs.deepweb.utility.wikipedia;

import java.io.File;
import java.io.IOException;

import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.store.LockObtainFailedException;

import ca.uwindsor.cs.deepweb.estimation.DictionariesFilesContentIndexer;
import ca.uwindsor.cs.deepweb.estimation.FilesContentIndexer;

public class WikiDocumentIndexer extends DictionariesFilesContentIndexer {

	public WikiDocumentIndexer(String sourcepath, String indexdir,
			String encoding, boolean verbose, boolean create, String[] ext) {
		super(sourcepath, indexdir, encoding, verbose, create, ext);
		// TODO Auto-generated constructor stub
	}

	protected void generateIndex(File dir) {
		try {
			File[] dataFiles = dir.listFiles();
			File file;
			// Add documents to the index
			for (int i = 0; i < dataFiles.length; i++) {
				file = dataFiles[i];
				int size;
				if (file.isFile()) {
					if (isValidExtension(file.getName())) {

						WikiDocumentHandler handler = new WikiDocumentHandler(
								file, encoding);
						try {
							handler.processContent();
						} catch (Exception e) {
							// e.printStackTrace();
							System.err.println("Error on parsing: "
									+ file.getCanonicalPath());
							continue;
						}

						String content = handler.getContent().replaceAll(
								"\\s+", " ").trim();

						size = content.length();

						if (isVerbose) {
							System.out.println("Indexing file "
									+ file.getCanonicalPath());
						}

						Document document = new Document();
						// make the ID from the path shorter - get the file name
						String filepath = file.getCanonicalPath();
						int offset1 = filepath
								.lastIndexOf(java.io.File.separator);
						int offset2 = filepath.lastIndexOf(".");
						String word = new String(filepath.substring(
								offset1 + 1, offset2));

						// String word = String.valueOf(globalID);
						// System.out.println(id);

						Field FieldPath = new Field(
								FilesContentIndexer.FIELD_ID, word,
								Field.Store.YES, Field.Index.UN_TOKENIZED);

						Field FieldTitle = new Field(
								FilesContentIndexer.FIELD_TITLE, handler
										.getTitle().replaceAll("\\s+", " ")
										.trim(), Field.Store.YES,
								Field.Index.UN_TOKENIZED);

						Field FieldSize = new Field(
								FilesContentIndexer.FIELD_SIZE, Integer
										.toString(size), Field.Store.YES,
								Field.Index.UN_TOKENIZED);

						Field FieldBody = new Field(
								FilesContentIndexer.FIELD_CONTENT, content,
								Field.Store.NO, Field.Index.TOKENIZED,
								Field.TermVector.WITH_POSITIONS_OFFSETS);

						document.add(FieldPath);
						document.add(FieldTitle);
						document.add(FieldSize);
						document.add(FieldBody);
						indexWriter.addDocument(document);
						// txtReader.close();
						globalID++;
					}
				} else if (file.isDirectory()) {
					generateIndex(file);
				}
			}
		} catch (CorruptIndexException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
			System.exit(1);
		} catch (LockObtainFailedException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
			System.exit(1);
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
			System.exit(1);
		}
	}

	public static void main(String[] args) {
		args = new String[7];
		 args[0] = "--SourceDir=D:\\Research\\estimation\\datasource\\zhwiki_20090116_grouped";
		 args[1] = "--IndexDir=F:\\indexes\\zhwiki_20090116";
		 args[2] = "--encoding=UTF-8";
		 args[3] = "-verbose";
		 args[4]= "-create";
		 args[5]= "-resursive";
		 args[6]= "--extension=txt";

		String SourceDir = new String();
		String IndexDir = new String();
		String encoding = new String("UTF-8");
		boolean isVerbose = false;
		boolean isCreate = true;
		boolean isRecursive = false;

		String ext[] = new String[] { "txt" };

		if (args.length < 2) {
			System.out
					.println("usage: java -jar WikiDocumentIndexer --SourceDir=sourcefiledirectory --IndexDir=indexfiledirectory --extension=txt,html,... [--encoding=encoding](default=UTF-8) [-verbose] [-create] !");
			System.exit(0);
		} else {
			for (int i = 0; i < args.length; i++) {
				if (args[i].startsWith("--SourceDir=")) {
					SourceDir = args[i].substring(12);
				} else if (args[i].startsWith("--IndexDir=")) {
					IndexDir = args[i].substring(11);
				} else if (args[i].startsWith("--encoding=")) {
					encoding = args[i].substring(11);
				} else if (args[i].equals("-create")) {
					isCreate = true;
				} else if (args[i].equals("-verbose")) {
					isVerbose = true;
				} else if (args[i].startsWith("--extension=")) {
					String tempexts = args[i].substring(12);
					ext = tempexts.split(",");
				} else if (args[i].endsWith("-resursive")) {
					isRecursive = true;
				}
			}
		}

		if (SourceDir.equals("") || IndexDir.equals("")) {
			System.out
					.println("usage: java -jar WikiDocumentIndexer SourceDir=sourcefiledirectory IndexDir=indexfiledirectory [encoding=encoding](default=UTF-8) [-verbose]!");
			System.exit(0);
		} else {
			WikiDocumentIndexer indexer;
			// if (isRecursive) {
			// indexer = new HTMLFilesContentIndexer(SourceDir, IndexDir,
			// encoding,
			// isVerbose, isCreate, ext);
			// } else {
			// indexer = new HTMLFilesContentIndexer (SourceDir, IndexDir,
			// encoding,
			// isVerbose, isCreate, ext);
			// }
			indexer = new WikiDocumentIndexer(SourceDir, IndexDir, encoding,
					isVerbose, isCreate, ext);
			indexer.generateIndex();
		}

	}

}
