
package jarvis.fs.document;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermEnum;

/**
 * 
 * Function: represent the document's tfidf vector
 * @author Jarvis.Guo
 *
 */
public class DocumentVector {

	private List<OrderedTFIDF> values;
	private boolean isNormalize;
	/**
	 * below two field for getTermNumTFIDF more quickly
	 */
	private int preSearchTermNum;
	private int currentPos;
	
	public DocumentVector()
	{
		values = new ArrayList<OrderedTFIDF>();
		isNormalize = false;
		preSearchTermNum = -1;//so 0 can comes next
		currentPos = 0;
	}
	
	public DocumentVector(int initCapacity)
	{
		values = new ArrayList<OrderedTFIDF>(initCapacity);
	}
	
	public void addValue(OrderedTFIDF f)
	{
		values.add(f);
	}
	
	public int size()
	{
		return values.size();
	}
	
	public OrderedTFIDF getValue(int i)
	{
		return values.get(i);
	}
	/**
	 * get the tfidf of the term which term number=termNum
	 * if not existed, return 0.0
	 * @param termNum
	 * @return
	 */
	public double getTermNumTFIDF(int termNum)
	{
		int startSearchPos = 0;
		if(termNum > preSearchTermNum)//this search's term number is bigger the one preview search
		{
			startSearchPos = currentPos;//search start from the currentPos
		}
		if (currentPos >= values.size()) {// reach the end and do not find the match one
			return 0.0;// do not need to reset the preSearchTerm since anyone>preSearchTerm will return 0.0
		}
		double result = 0.0;
		int i;
		for(i=startSearchPos;i<values.size();i++)
		{
			OrderedTFIDF value  =values.get(i);
			if(value.getTermNum() == termNum)
			{
				result = value.getTfidf();//find
				currentPos = i+1;// currentPos set to i+1 where next search start from
				break;
			}
			if(value.getTermNum() > termNum)//not find
			{
				currentPos = i;//set to the next position
				break;
			}
		}
		if(i==values.size())//reach the end, not find
		{
			currentPos = i;
		}
		preSearchTermNum = termNum;//reset the preSearchTermNum
		return result;
		
	}
	
	/**
	 * let a1^2+a2^2+....+an^2=1
	 *
	 */
	public void normalize()
	{
		double sum = 0.0;
		for(OrderedTFIDF value:values)
		{
			sum += value.getTfidf()*value.getTfidf();
		}
		sum = Math.sqrt(sum);
		for(OrderedTFIDF value:values)
		{
			value.setTfidf(value.getTfidf()/sum);
		}
		isNormalize = true;
	}
	
	/**
	 * compute the similarity bewteen this vector and the param vector
	 * @param vector
	 * @return
	 */
	public double similary(DocumentVector vector)
	{
		//normalize first
		if(!isNormalize()) normalize();
		if(!vector.isNormalize()) vector.normalize();
		int apos = 0,bpos=0;
		double result = 0.0;
		while(apos<this.size() && bpos<vector.size())
		{
			OrderedTFIDF a = this.getValue(apos);
			OrderedTFIDF b = vector.getValue(bpos);
			if(a.getTermNum()==b.getTermNum())//product of the tfidf with the same term number
			{
				result += a.getTfidf() * b.getTfidf();
				apos++;bpos++;
			}
			else if(a.getTermNum()>b.getTermNum())
			{
				bpos++;
			}
			else
				apos++;
		}
		return result;
	}
	
	

	/**
	 * @return the isNormalize
	 */
	public boolean isNormalize() {
		return isNormalize;
	}
	
	/**
	 * construct the document vector with indexreader
	 * @param indexReader
	 * @return
	 */
	public static DocumentVector[] getVectors(IndexReader indexReader)
	{
		
		int docCount = indexReader.maxDoc();
		DocumentVector[] documentsVector = new DocumentVector[docCount];//one vector per document in index
		for(int i=0;i<docCount;i++)
		{
			documentsVector[i] = new DocumentVector();
		}
		//compute every doc's tdidf vector, by enum the term
		try{
			TermEnum te = indexReader.terms();
			TermDocs td = null;//TermDocs can enumerate the docs where the specify term appears
			int termNum = 0;//term number refer to the term position in TermEnum
			while(te.next())
			{
				Term term = te.term();
				double idf = Math.log((double)docCount/te.docFreq());//term's idf
				td = indexReader.termDocs(term);
				while(td.next())
				{
					int docNum = td.doc();//document number
					int freq = td.freq();//tf in the document
					double tfidf = freq * idf;
					documentsVector[docNum].addValue(new OrderedTFIDF(termNum,tfidf));
				}
				termNum++;
			}
		}
		catch(IOException ex)
		{
			ex.printStackTrace();
			throw new RuntimeException(ex);
		}
		//normalize them
		for(DocumentVector v : documentsVector)
		{
			v.normalize();
		}
		return documentsVector;
	}

}
