package org.newlucene.core.index;

import java.io.IOException;
import java.io.File;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Vector;

import org.newlucene.core.analysis.Analyzer;
import org.newlucene.core.document.Document;
import org.newlucene.core.document.Field;
import org.newlucene.core.store.Directory;
import org.newlucene.core.store.FSDirectory;
import org.newlucene.core.store.InputStream;
import org.newlucene.core.store.OutputStream;
import org.newlucene.core.store.RAMDirectory;

/**
  An IndexWriter creates and maintains an index.

  The third argument to the <a href="#IndexWriter"><b>constructor</b></a>
  determines whether a new index is created, or whether an existing index is
  opened for the addition of new documents.

  In either case, documents are added with the <a
  href="#addDocument"><b>addDocument</b></a> method.  When finished adding
  documents, <a href="#close"><b>close</b></a> should be called.

  If an index will not have more documents added for a while and optimal search
  performance is desired, then the <a href="#optimize"><b>optimize</b></a>
  method should be called before the index is closed.
  */

public final class IndexWriter 
{    
	private Directory directory;			  // where this index resides
	private FieldInfos fieldInfos;
	private Map<String, Analyzer> analyzerMap;
	private SegmentInfos segmentInfos; // the segments
  
	private final Directory ramDirectory = new RAMDirectory(); // for temp segs
 
	public IndexWriter(File dir, Field[] fields, boolean create) throws IOException
	{
		if (create)
		{
			directory = new FSDirectory(dir, true);
			// write fieldInfos
			fieldInfos = new FieldInfos(fields);
			fieldInfos.write(directory);
			// write segmentInfos
			segmentInfos = new SegmentInfos();
			segmentInfos.write(directory);
		}
		else
		{
			directory = new FSDirectory(dir, false);
			// load fieldInfos
			fieldInfos = new FieldInfos(directory);
			// load segmentInfos
			segmentInfos = new SegmentInfos();
			segmentInfos.read(directory);
		}
      
		analyzerMap = new HashMap<String, Analyzer>(fields.length);
		for (Field field : fields)
		{
			analyzerMap.put(field.getName(), field.getAnalyzer());
		}
	}
  
	/** Flushes all changes to an index, closes all associated files, and closes
    the directory that the index is stored in. */
	public final void close() throws IOException 
	{
		flushRamSegments();
		ramDirectory.close();
		directory.close();
	}
  
	/** Returns the number of documents currently in this index. */
	public final int docCount() 
	{
		int count = 0;
		for (int i = 0; i < segmentInfos.size(); i++) 
		{
			SegmentInfo si = segmentInfos.info(i);
			count += si.docCount;
		}
		return count;
	}

	/** The maximum number of terms that will be indexed for a single field in a
    document.  This limits the amount of memory required for indexing, so that
    collections with very large files will not crash the indexing process by
    running out of memory.

    <p>By default, no more than 50,000 terms will be indexed for a field. */
	public int maxFieldLength = 50000;

	/** Adds a document to this index.*/
	public final void addDocument(Document doc) throws IOException 
	{
		DocumentWriter dw = new DocumentWriter(ramDirectory, fieldInfos, analyzerMap, maxFieldLength);
		String segmentName = newSegmentName();
		dw.addDocument(segmentName, doc);
		segmentInfos.add(new SegmentInfo(segmentName, 1, ramDirectory));
		maybeMergeSegments();
	}

	private final synchronized String newSegmentName() 
	{
		String segName = "_" + Integer.toString(segmentInfos.getCounter(), Character.MAX_RADIX);
		return segName;
	}

	/** Determines how often segment indexes are merged by addDocument().  With
	 * smaller values, less RAM is used while indexing, and searches on
	 * unoptimized indexes are faster, but indexing speed is slower.  With larger
	 * values more RAM is used while indexing and searches on unoptimized indexes
	 * are slower, but indexing is faster.  Thus larger values (> 10) are best
	 * for batched index creation, and smaller values (< 10) for indexes that are
	 * interactively maintained.
	 *
	 * <p>This must never be less than 2.  The default value is 10.*/
	private int mergeFactor = 10;

	/** Determines the largest number of documents ever merged by addDocument().
	 * Small values (e.g., less than 10,000) are best for interactive indexing,
	 * as this limits the length of pauses while indexing to a few seconds.
	 * Larger values are best for batched indexing and speedier searches.
	 *
	 * <p>The default value is {@link Integer#MAX_VALUE}. */
	private int maxMergeDocs = Integer.MAX_VALUE;

	/** If non-null, information about merges will be printed to this. */
	public PrintStream infoStream = null;

	public void setMergeFactor(int mergeFactor)
	{
		this.mergeFactor = mergeFactor;
	}
  
	public void setMaxMergeDocs(int maxMergeDocs)
	{
		this.maxMergeDocs = maxMergeDocs;
	}
  
	/** Merges all segments together into a single segment, optimizing an index for search. */
	public final synchronized void optimize() throws IOException 
	{
		flushRamSegments();
		while (segmentInfos.size() > 1 
			|| (segmentInfos.size() == 1 && (SegmentReader.hasDeletions(segmentInfos.info(0))
			|| segmentInfos.info(0).dir != directory))
		) 
		{
			int minSegment = segmentInfos.size() - mergeFactor;
			mergeSegments(minSegment < 0 ? 0 : minSegment);
		}
	}

	/** Merges all segments from an array of indexes into this index.
	 *
	 * <p>This may be used to parallelize batch indexing.  A large document
	 * collection can be broken into sub-collections.  Each sub-collection can be
	 * indexed in parallel, on a different thread, process or machine.  The
	 * complete index can then be created by merging sub-collection indexes
	 * with this method.
	 *
	 * <p>After this completes, the index is optimized. */
	public final synchronized void addIndexes(Directory[] dirs) throws IOException 
	{
		optimize();					  // start with zero or 1 seg
		for (int i = 0; i < dirs.length; i++) 
		{
			SegmentInfos sis = new SegmentInfos();	  // read infos from dir
			sis.read(dirs[i]);
			for (int j = 0; j < sis.size(); j++) 
			{
				segmentInfos.add(sis.info(j));	  // add each info
			}
		}
		optimize();					  // final cleanup
	}

	/** Merges all RAM-resident segments. */
	private final void flushRamSegments() throws IOException 
	{
		int minSegment = segmentInfos.size()-1;
		int docCount = 0;
		while (minSegment >= 0 && (segmentInfos.info(minSegment)).dir == ramDirectory) 
		{
			docCount += segmentInfos.info(minSegment).docCount;
			minSegment--;
		}
		if (minSegment < 0 			  // add one FS segment?
			|| (docCount + segmentInfos.info(minSegment).docCount) > mergeFactor
			|| !(segmentInfos.info(segmentInfos.size()-1).dir == ramDirectory)
		)
		{
			minSegment++;
		}
		if (minSegment >= segmentInfos.size())
		{
			return;					  // none to merge
		}
		mergeSegments(minSegment);
	}

	/** Incremental segment merger.  */
	private final void maybeMergeSegments() throws IOException 
	{
		long targetMergeDocs = mergeFactor;
		while (targetMergeDocs <= maxMergeDocs) 
		{
			// find segments smaller than current target size
			int minSegment = segmentInfos.size();
			int mergeDocs = 0;
			while (--minSegment >= 0) 
			{
				SegmentInfo si = segmentInfos.info(minSegment);
				if (si.docCount >= targetMergeDocs)
				{
					break;
				}
				mergeDocs += si.docCount;
			}

			if (mergeDocs >= targetMergeDocs)		  // found a merge to do
			{
				mergeSegments(minSegment+1);
			}
			else
			{
				break;
			}
      
			targetMergeDocs *= mergeFactor;		  // increase target size
		}
	}

	/** Pops segments off of segmentInfos stack down to minSegment, merges them,
    	and pushes the merged index onto the top of the segmentInfos stack. */
	private final void mergeSegments(int minSegment) throws IOException 
	{
		String mergedName = newSegmentName();
		int mergedDocCount = 0;
		if (infoStream != null)
		{
			infoStream.print("merging segments");
		}
		SegmentMerger merger = new SegmentMerger(directory, mergedName, fieldInfos);
		List<SegmentReader> segmentsToDelete = new ArrayList<SegmentReader>();
		for (int i = minSegment; i < segmentInfos.size(); i++)
		{
			SegmentInfo si = segmentInfos.info(i);
			if (infoStream != null)
			{
				infoStream.print(" " + si.name + " (" + si.docCount + " docs)");
			}
			SegmentReader reader = new SegmentReader(si, fieldInfos);
			merger.add(reader);
			if ((reader.directory == this.directory) // if we own the directory
				|| (reader.directory == this.ramDirectory))
			{
				segmentsToDelete.add(reader); // queue segment for deletion
			}
			mergedDocCount += si.docCount;
		}
		if (infoStream != null)
		{
			infoStream.println();
			infoStream.println(" into " + mergedName + " (" + mergedDocCount + " docs)");
		}
		merger.merge();

		segmentInfos.setSize(minSegment); // pop old infos & add new
		segmentInfos.add(new SegmentInfo(mergedName, mergedDocCount, directory));

		segmentInfos.write(directory); // commit before deleting
		deleteSegments(segmentsToDelete); // delete now-unused segments
	}

	private final void deleteSegments(List<SegmentReader> segments) throws IOException 
	{
		for (SegmentReader reader : segments)
		{
			deleteFiles(reader.files(), reader.directory);
		}
	}

	private final void deleteFiles(List<String> files, Directory directory) throws IOException 
	{
		for (String file : files)
		{
			directory.deleteFile(file);
		}
	}
}
