package IndexingLucene;

import java.io.File;
import java.io.IOException;
import java.nio.file.Paths;

import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.BytesRef;

/**
 * A class for reading your index.
 */
public class MyIndexReader {
	
	protected File dir;
	/**
	 * [class] store.Directory: 
	 * A Directory is a flat list of files.
	 */
	private Directory directory;
	
	/**
	 * [class] index.DirectoryReader: 
	 * An implementation of CompositeReader(extending IndexReader) that can read 
	 * indexes in a Directory. DirectoryReader instances are usually constructed 
	 * with a call to one of the static open() methods, e.g. open(Directory).
	 */
	private DirectoryReader dreader;
	
	/**
	 * [class] search.IndexSearcher: 
	 * Implements search over a single IndexReader.
	 */
	private IndexSearcher isearcher;
	
	/**
	 * [class] search.CollectionStatistics: 
	 * Contains statistics for a collection (field)
	 */
	private CollectionStatistics ics;
	
	/**
	 * Initialize index reader.
	 * 
	 * @param dataType
	 * @throws IOException
	 */
	public MyIndexReader(String dataType) throws IOException {
		// 1. Determine the type of a given collection 
		if(dataType.equals("trectext")) {
			/**
			 * [class] store.FSDirectory: 
			 * Base class for Directory implementations that store index files in 
			 * the file system, which uses a file system directory to store files.
			 * 
			 * [class] java.nio.file.Paths:
			 * This class consists exclusively of static methods that return a Path 
			 * by converting a path string or URI.
			 */
			directory = FSDirectory.open(Paths.get(Classes.Path.IndexTextDir));  
		} else {
			directory = FSDirectory.open(Paths.get(Classes.Path.IndexWebDir)); 
		}
		
		// 2. Initiate a directory reader for reading indexes in a Directory
		dreader = DirectoryReader.open(directory);
		
		// 3. Initiate a index searcher for searching over a single IndexReader
		isearcher = new IndexSearcher(dreader);
		
		/**
		 * [method] CollectionStatistics IndexSearcher.collectionStatistics(String field):
		 * Returns CollectionStatistics for a field.
		 */
		ics = isearcher.collectionStatistics("CONTENT");	
	}
	
	/**
	 * Get the (non-negative) integer docid for the requested docno.
	 * If -1 returned, it indicates the requested docno does not exist in the index.
	 * NOTE: YOU SHOULD IMPLEMENT THIS METHOD.
	 * 
	 * @param docno
	 * @return
	 * @throws IOException 
	 */
	public int getDocid(String docno) throws IOException {
		/**
		 * [class] search.Query: 
		 * The abstract base class for queries.
		 * 
		 * [class] search.TermQuery: 
		 * A Query that matches documents containing a term. 
		 * This may be combined with other terms with a BooleanQuery.
		 * 
		 * [constructor] TermQuery(Term t):
		 * Constructs a query for the term t.
		 * 
		 * [class] index.Term:
		 * A Term represents a word from text. This is the unit of search. 
		 * It is composed of two elements, the text of the word, as a string, 
		 * and the name of the field that the text occurred in. 
		 * Note that terms may represent more than words from text fields, 
		 * but also things like dates, email addresses, urls, etc.
		 * 
		 * [constructor] Term(String fld, String text):
		 * Constructs a Term with the given field and text.
		 */
		// 1. Construct a query with a field "DOCNO"
		Query query = new TermQuery(new Term("DOCNO", docno));
		
		/**
		 * [method] TopDocs search(Query query, int n):
		 * Finds the top n hits for query.
		 * 
		 * [class] search.TopDocs:
		 * Represents hits returned by IndexSearcher.search(Query,int).
		 */
		// 2. Find the top 1 hit for the given query
		TopDocs tops = isearcher.search(query, 1); // TopDocs: A list of hits returned by search method
		
		/**
		 * [field] ScoreDoc[] TopDocs.ScoreDocs:
		 * The top hits for the query.
		 * 
		 * [class] search.ScoreDoc:
		 * Holds one hit in TopDocs.
		 * 
		 * [field] int ScoreDoc.doc:
		 * A hit document's number.
		 */
		// 3. Return the number of the found top 1 hit
		return tops.scoreDocs[0].doc; // ScoreDoc: One hit in TopDocs
	}
	
	/**
	 * Retrive the docno for the integer docid.
	 * 
	 * @param docid
	 * @return
	 * @throws IOException 
	 */
	public String getDocno(int docid) throws IOException {
		/**
		 * [method] Document IndexReader.document(int docID):
		 * Returns the stored fields of the nth Document in this index.
		 * 
		 * [class] document.StoredField:
		 * A field whose value is stored so that IndexSearcher.doc(int) and 
		 * IndexReader.document() will return the field and its value.
		 */
		Document doc = dreader.document(docid);
		
		/**
		 * [method] String Document.get(String name):
		 * Returns the string value of the field with the given name 
		 * if any exist in this document, or null.
		 */
		return (doc == null) ? null : doc.get("DOCNO");
	}
	
	/**
	 * Get the posting list for the requested token.
	 * 
	 * The posting list records the documents' docids the token appears and corresponding 
	 * frequencies of the term, such as:
	 *  
	 * [docid]		[freq]
	 * 1			3
	 * 5			7
	 * 9			1
	 * 13			9
	 * ...
	 * 
	 * In the returned 2-dimension array, the first dimension is for each document, and 
	 * the second dimension records the docid and frequency.
	 * 
	 * For example:
	 * array[0][0] records the docid of the first document the token appears.
	 * array[0][1] records the frequency of the token in the documents with docid = array[0][0]
	 * ...
	 * 
	 * NOTE that the returned posting list array should be ranked by docid from the smallest to the largest. 
	 * 
	 * @param token
	 * @return
	 */
	public int[][] getPostingList(String token) throws IOException {
		/**
		 * [class] index.Term: 
		 * A Term represents a word from text.
		 * 
		 * [constructor] Term(String fld, String text):
		 * Constructs a Term with the given field and text.
		 */
		Term tm = new Term("CONTENT", token);
		
		/**
		 * [method inherited] DirectoryReader.docFreq(Term term):
		 * Returns the number of documents containing the term.
		 */
		int df = dreader.docFreq(tm);
		if(df==0) {
			return null;
		}
	
		Query query = new TermQuery(tm);
		TopDocs tops= isearcher.search(query, df); 
		ScoreDoc[] scoreDocs = tops.scoreDocs;
		
		int[][] posting = new int[df][];
		int ix = 0;
		
		/**
		 * [class] index.Terms:
		 * Access to the terms in a specific field.
		 */
		Terms vector;
		
		/**
		 * [class] index.TermEnum:
		 * Iterator to seek (seekCeil(BytesRef), seekExact(BytesRef)) or step through 
		 * (BytesRefIterator.next() terms to obtain frequency information (docFreq()), 
		 * PostingsEnum or PostingsEnum for the current term 
		 * (postings(org.apache.lucene.index.PostingsEnum).
		 * Term enumerations are always ordered by BytesRef.compareTo, which is Unicode
		 * sort order if the terms are UTF-8 bytes. 
		 * Each term in the enumeration is greater than the one before it.
		 */
		TermsEnum termsEnum;
		
		/**
		 * [class] util.BytesRef:
		 * Represents byte[], as a slice (offset + length) into an existing byte[].
		 * The bytes member should never be null; use EMPTY_BYTES if necessary.
		 * 
		 * [memeber] public byte[] bytes:
		 * The contents of the BytesRef. Should never be null.
		 * 
		 * Important note: Unless otherwise noted, Lucene uses this class to represent 
		 * terms that are encoded as UTF8 bytes in the index. To convert them to a Java 
		 * String (which is UTF16), use utf8ToString(). Using code like 
		 * new String(bytes, offset, length) to do this is wrong, as it does not respect 
		 * the correct character set and may return wrong results 
		 * (depending on the platform's defaults)!
         * 
         * BytesRef implements Comparable. The underlying byte arrays are sorted 
         * lexicographically, numerically treating elements as unsigned. 
         * This is identical to Unicode codepoint order.
		 */
		BytesRef text;
		for (ScoreDoc hits : scoreDocs){
			int docid = hits.doc;
			int freq = 0;
			/**
			 * [method inherited] Terms DirectoryReader.getTermVector(int docID, String field):
			 * Retrieve term vector for this document and field, or null if term vectors were not indexed.
			 */
			vector = dreader.getTermVector(docid, "CONTENT");
			
			/**
			 * [method] abstract TermsEnum Terms.iterator():
			 * Returns an iterator that will step through all terms.
			 */
			termsEnum = vector.iterator(); // Get all terms in a doc's "CONTENT" field
			while((text = termsEnum.next()) != null) { // Iterate every term in a doc's the "CONTENT" field
			    if(text.utf8ToString().equals(token))
			    	/**
			    	 * [method] abstract long TermsEnum.totalTermFreq():
			    	 * Returns the total number of occurrences of this term across all documents 
			    	 * (the sum of the freq() for each doc that has this term).
			    	 */
			    	freq += (int) termsEnum.totalTermFreq();
			}
			posting[ix] = new int[] { docid, freq };
			ix++;
		}
		
		return posting;
	}
	
	/**
	 * Return the number of documents that contains the token.
	 * 
	 * @param token
	 * @return
	 */
	public int DocFreq(String token) throws IOException {
		Term tm = new Term("CONTENT", token);
		int df = dreader.docFreq(tm);
		return df;
	}
	
	/**
	 * Return the total number of times the token appears in the collection.
	 * 
	 * @param token
	 * @return
	 */
	public long CollectionFreq(String token) throws IOException {
		Term tm = new Term("CONTENT", token);
		long ctf = dreader.totalTermFreq(tm);
		return ctf;
	}
	
	/**
	 * Get the length of the requested document. 
	 * 
	 * @param docid
	 * @return
	 * @throws IOException
	 */
	public int docLength(int docid) throws IOException {
		int docLength = 0;
		Terms vector = dreader.getTermVector(docid, "CONTENT");
		TermsEnum termsEnum = vector.iterator();
		BytesRef text;
		while ((text = termsEnum.next()) != null) { 
			docLength += (int) termsEnum.totalTermFreq();
		}
		
		return docLength;
	}
	
	/**
	 * Get corpus size(how many terms appear in the collection).
	 * @return
	 */
	public long getCorpusSize() {
		/**
		 * [method] long CollectionStatistics.sumTotalTermFreq():
		 * Returns the total number of tokens for this field
		 * 
		 * [method] long CollectionStatistics.sumDocFreq():
		 * Returns the total number of postings for this field
		 */
		return ics.sumTotalTermFreq();
	}
	
	public void close() throws IOException {
		dreader.close();
		directory.close();
	}	
}
