import java.util.List;

import Classes.Document;
import Classes.Query;
import IndexingLucene.MyIndexReader;
import PseudoRFSearch.*;
import SearchLucene.*;

public class HW4Main {

	public static void main(String[] args) throws Exception {
		
		// 1. Open index, initialize the pseudo relevance feedback retrieval model, and extract queries
		MyIndexReader ireader = new MyIndexReader("trectext");
		PseudoRFRetrievalModel PRFSearchModel = new PseudoRFRetrievalModel(ireader);
		ExtractQuery queries = new ExtractQuery();
		
		// 2. Begin searching
		long startTime = System.currentTimeMillis();
		while(queries.hasNext()) {
			Query aQuery = queries.next();
			List<Document> rankedList = PRFSearchModel.RetrieveQuery(aQuery, 20, 100, 0.4);
			if(rankedList != null) {
				int rank = 1;
				for(Document doc : rankedList) {
					System.out.println(aQuery.getTopicId() + " Q0 " 
							         + doc.getDocNo() + " " 
							         + rank + " " 
							         + doc.getDocScore() + " MYRUN");
					rank++;
				}
			}
		}
		long endTime = System.currentTimeMillis(); 
		
		// 3. Output running time
		System.out.println("\n\n4 queries search time: " + (endTime - startTime) / 60000.0 + " min");
		ireader.close();
	}
	
//	public static void main(String[] args) throws IOException {
//	    /**
//	     * The content of result.* file is words after tokenization and normalization.
//	     * So the file ready for being indexing is not with the original content.
//	     */
//		FileInputStream fis = new FileInputStream(Path.ResultHM1 + "trectext");
//		InputStreamReader isr = new InputStreamReader(fis);
//		BufferedReader br = new BufferedReader(isr);
//		
//		String line = null;
//		for (int i = 0; i < 20; i++) {
//			line = br.readLine();
//			System.out.println(line);
//		}
//		
//		br.close();
//	}
}