package org.newlucene.core.index;

import java.io.IOException;
import java.util.Arrays;
import java.util.Hashtable;
import java.util.Enumeration;
import java.util.Map;

import org.newlucene.core.analysis.Analyzer;
import org.newlucene.core.analysis.Token;
import org.newlucene.core.document.Document;
import org.newlucene.core.document.Field;
import org.newlucene.core.store.Directory;
import org.newlucene.core.store.OutputStream;

final class DocumentWriter
{
    private Directory directory;
    private FieldInfos fieldInfos;
    private Map<String, Analyzer> analyzerMap;
    private int maxFieldLength;

    DocumentWriter(Directory d, FieldInfos fieldInfos, Map<String, Analyzer> analyzerMap, int mfl)
    {
        directory = d;
        this.fieldInfos = fieldInfos;
        this.analyzerMap = analyzerMap;
        maxFieldLength = mfl;
    }

    final void addDocument(final String segmentName, Document doc) throws IOException
    {
        // write field values
        FieldDataWriter fieldsWriter = new FieldDataWriter(directory, segmentName, fieldInfos);
        try
        {
            fieldsWriter.addDocument(doc);
        }
        finally
        {
            fieldsWriter.close();
        }

        // invert doc into postingTable
        postingTable.clear(); // clear postingTable
        fieldLengths = new int[fieldInfos.size()]; 
        // each element in this array store count of tokens in doc
        invertDocument(doc);

        Posting[] postings = sortPostingTable();
        writePostings(postings, segmentName);
    }

    // Keys are Terms, values are Postings.
    // Used to buffer a document before it is written to the index.
    private final Hashtable<Term, Posting> postingTable = new Hashtable<Term, Posting>();

    private int[] fieldLengths;//number of tokens in doc for each field

    // Tokenizes the fields of a document into Postings.
    private final void invertDocument(Document doc) throws IOException
    {
    	for (FieldInfo fi : fieldInfos.getFieldInfoList())
    	{
    		String fieldText = doc.get(fi.name);
    		if (fieldText == null)
    		{
    			continue;
    		}
    		
    		int position = fieldLengths[fi.number]; 
    		// term position among tokens in doc, also count tokens for field in doc, just to limit the size of doc
    		
            if (fi.isFrq())	// if field is indexed
            {
            	// get analyzer for this field
                Analyzer fieldAnalyzer = analyzerMap.get(fi.name);
                
                // Tokenize field and add to postingTable
                fieldAnalyzer.set(fieldText);
                for (Token t = fieldAnalyzer.next(); t != null; t = fieldAnalyzer.next())
                {
                    addPosition(fi.name, t.termText, position++);
                    //write posting table for each token, including term, freq, positions
                    if (position > maxFieldLength) break;
                }

                fieldLengths[fi.number] = position; 
                // save token counts for field in doc, just to limit the size of doc
            }
    	}
    }

    private final Term termBuffer = new Term("", ""); // avoid constructing

    private final void addPosition(String field, String termText, final int position)
    {
        termBuffer.set(field, termText);
        Posting ti = postingTable.get(termBuffer);
        if (ti != null)
        { // word seen before
            int freq = ti.freq;
            if (ti.positions.length == freq)
            { // positions array is full
                int[] newPositions = new int[freq * 2]; // double size
                int[] positions = ti.positions;
                for (int i = 0; i < freq; i++)
                    // copy old positions to new
                    newPositions[i] = positions[i];
                ti.positions = newPositions;
            }
            ti.positions[freq] = position; // add new position
            ti.freq = freq + 1; // update frequency
        }
        else
        { // word not seen before
            Term term = new Term(field, termText);
            postingTable.put(term, new Posting(term, position));
        }
    }

    private final Posting[] sortPostingTable()
    {
        // copy postingTable into an array
        Posting[] array = new Posting[postingTable.size()];
        Enumeration postings = postingTable.elements();
        for (int i = 0; postings.hasMoreElements(); i++)
            array[i] = (Posting) postings.nextElement();

        // sort the array
        Arrays.sort(array);
//        quickSort(array, 0, array.length - 1);

        return array;
    }

    private final void writePostings(Posting[] postings, final String segment) throws IOException
    {
        OutputStream freqStream = null;
        TermInfosWriter tis = null;

        try
        {
            freqStream = directory.createFile(segment + ".frq");
            tis = new TermInfosWriter(directory, segment, fieldInfos);
            TermInfo ti = new TermInfo(); //reusable termInfo object

            for (int i = 0; i < postings.length; i++)
            {
                Posting posting = postings[i];

                // add an entry to the dictionary with pointers to freq files
                ti.set(1, freqStream.getFilePointer());//docFreq,Freq pointer
                tis.add(posting.term, ti);

                // add an entry to the freq file
                int freq = posting.freq;
                if (freq == 1) // optimize freq=1
                {
                    freqStream.writeVInt(1); // set low bit of doc num.
                }
                else
                {
                    freqStream.writeVInt(0); // the document number
                    freqStream.writeVInt(freq); // frequency in doc
                }

                if (fieldInfos.getFieldInfo(posting.term.field).isPos())	// add pos info only if it is turned on for this field
                {
                    int lastPosition = 0; // write positions
                    int[] positions = posting.positions;
                    for (int j = 0; j < freq; j++)
                    { // use delta-encoding
                        int position = positions[j];
                        freqStream.writeVInt(position - lastPosition);
                        lastPosition = position;
                    }	
                }                
            }
        }
        finally
        {
            if (freqStream != null) freqStream.close();
            if (tis != null) tis.close();
        }
    }

    // quick sort algorithm implementation on Posting class
    static private final void quickSort(Posting[] postings, int lo, int hi)
    {
        if (lo >= hi) return;

        int mid = (lo + hi) / 2;

        if (postings[lo].term.compareTo(postings[mid].term) > 0)
        {
            Posting tmp = postings[lo];
            postings[lo] = postings[mid];
            postings[mid] = tmp;
        }

        if (postings[mid].term.compareTo(postings[hi].term) > 0)
        {
            Posting tmp = postings[mid];
            postings[mid] = postings[hi];
            postings[hi] = tmp;

            if (postings[lo].term.compareTo(postings[mid].term) > 0)
            {
                Posting tmp2 = postings[lo];
                postings[lo] = postings[mid];
                postings[mid] = tmp2;
            }
        }

        int left = lo + 1;
        int right = hi - 1;

        if (left >= right) return;

        Term partition = postings[mid].term;

        for (;;)
        {
            while (postings[right].term.compareTo(partition) > 0)
                --right;

            while (left < right && postings[left].term.compareTo(partition) <= 0)
                ++left;

            if (left < right)
            {
                Posting tmp = postings[left];
                postings[left] = postings[right];
                postings[right] = tmp;
                --right;
            }
            else
            {
                break;
            }
        }

        quickSort(postings, lo, left);
        quickSort(postings, left + 1, hi);
    }
}

final class Posting implements Comparable<Posting>
{ 
	// info about a Term in a doc
    Term term; // the Term

    int freq; // its frequency in doc

    int[] positions; // positions it occurs at, 
    //each time term occurs in doc, freq+1, and its position saved in this array

    Posting(Term t, int position)
    {
        term = t;
        freq = 1;
        positions = new int[1];
        positions[0] = position;
    }

	public int compareTo(Posting other) {
		return this.term.compareTo(other.term);
	}
}
