package Indexing;

import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.HashMap;
import java.util.Map;

public class MyIndexReader {
	// YOU ARE SUGGESTED TO WRTIE VERY EFFICIENT CODE HERE, OTHERWISE, YOUR MEMORY CANNOT HOLD OUR CORPUS... ->
	private String dataType; // trectext | trecweb
	
	private Map<String, String> docIDMap; 
	private Map<String, String> dictMap; 
	  
	private BufferedReader brForDocID; // BufferedReader stream for reading dataType.docID file
	private BufferedReader brForDictFlie; // BufferedReader stream for reading dataType.docID file

	/**
	 * Constructor: Initate the IndexReader
	 * @param type
	 * @throws IOException
	 */
	public MyIndexReader(String type) throws IOException {
		// Read the index files you generated in task 1.
		// Remember to close them when you finish using them and use appropriate structure to store your index.
		dataType = type;
		
		// Read in ".docID" file
	    FileInputStream fis = new FileInputStream("data//" + dataType + ".docID");
	    InputStreamReader isr = new InputStreamReader(fis);
	    brForDocID = new BufferedReader(isr);
	    
	    docIDMap = new HashMap<String, String>();
	    String str = "";
	    while ((str = brForDocID.readLine()) != null) {
	      String[] s = str.split(","); // intDocID,docNo
	      docIDMap.put(s[0], s[1]); // s[0]: intDocID (key) -> s[1]: docNo (value)
	      docIDMap.put(s[1], s[0]); // s[1]: docNo (value) -> s[0]: intDocID (value)
	    }
	    
		// Read in ".dict" file
	    InputStream fisDict = new FileInputStream("data//" + dataType + ".dict");
	    brForDictFlie = new BufferedReader(new InputStreamReader(fisDict));
	    dictMap = new HashMap<String, String>();
	    while ((str = brForDictFlie.readLine()) != null) {
	      String[] s = str.split(","); // term,termIndex(0-based)
	      dictMap.put(s[0], s[1]); // s[0]: term (key) -> s[1]: termIndex (value)
	    }
	}
	
	/**
	 * Get the non-negative integer dociId for the requested docNo.
	 * If the requested docno does not exist in the index, return -1
	 * @param docno
	 * @return
	 */
	public int GetDocid(String docno) {
		if(docno == null || !docIDMap.containsKey(docno)) {
			return -1;
		}
		
		return Integer.parseInt(docIDMap.get(docno));
		//return Integer.parseInt(docIDMap.getOrDefault(docno, "-1")); 
	}

	/**
	 * Get the docno for the integer docid
	 * @param docid
	 * @return
	 */
	public String GetDocno(int docid) {
		if(docid<0 || !docIDMap.containsKey(Integer.toString(docid))) {
			throw new IllegalArgumentException("Invalid docid");
		}
		
		return docIDMap.get(Integer.toString(docid));
	}
	
	/**
	 * Get partial postings list info, docID and tf, for the targeted term.
	 * 
	 * The posting list records the documents' docids the token appears and corresponding frequencies of the term(tf), 
	 * such as:
	 *  
	 * [docid]		[freq]
	 * 1			3
	 * 5			7
	 * 9			1
	 * 13			9
	 * ...
	 * 
	 * In the returned 2-dimension array, the first dimension is for each document, and the second dimension records the docid and frequency.
	 * For example:
	 * array[0][0] records the docid of the first document the token appears.
	 * array[0][1] records the frequency of the token in the documents with docid = array[0][0]
	 * ...
	 * 
	 * NOTE that the returned posting list array should be ranked by docid from the smallest to the largest. 
	 * 
	 * @param token
	 * @return
	 */
	public int[][] GetPostingList(String targetedTerm) throws IOException {
		if(targetedTerm == null || !dictMap.containsKey(targetedTerm)) {
			throw new IllegalArgumentException("Invalid token.");
		}
		
		int ithLine = Integer.parseInt(dictMap.get(targetedTerm)); 
		//int ithLine = Integer.parseInt(dictMap.getOrDefault(token, "-1"));
	    
	    InputStream fis = new FileInputStream("data//" + dataType + ".index");
	    BufferedReader br = new BufferedReader(new InputStreamReader(fis));
	    
	    String[] targetedLine = new String[2];
	    for (int i = 0; i < ithLine; i++) { // Search the targeted term line by line until reach the previous line of it
	      br.readLine();
	    }
	    targetedLine = br.readLine().split("\\s"); 
	    String pls = targetedLine[1]; // Extract the postings list part(exclude the term part, targetedLine[0]) (all docs)
	    br.close();
	    
	    if(pls != "") {
	      String[] pl = pls.split(";"); // Get single postings list of the targeted term (a doc)
	      int[][] result = new int[pl.length][2]; // row: postingsList length | column: 2
	      for(int i = 0; i<pl.length; i++) {
	        if (pl[i] != "") {
	          result[i][0] = Integer.parseInt(pl[i].split(":")[0]); // Get intDocID
	          result[i][1] = Integer.parseInt(pl[i].split(":")[1].split(",")[0]); // Get tf
	        }
	      }
	      return result;
	    }
	    
	    return null;
	}
	/*
	 * Postings list structure: intDocID | tf | positions
	 * 
	 * i.e. For a pseudo term "apple", 
	 *      its postings lists are:
	 *      1:2,44,67;(1st doc)2:3,56,78,107;(2nd doc)3:5,10,34,79,88,103;(3nd doc)....
	 *      
	 *      its every postings list is:
	 *      1:2,44,67;
	 *      2:3,56,78,107;
	 *      3:5,10,34,79,88,103;
	 *      ...
	 *    
	 *      The structure of result:
	 *       _____________________
	 *      |  intDocID  |   tf   |
	 *       —————————————————————
	 *      |     1      |    2   |
	 *      |     2      |    3   |
	 *      |     3      |    5   |
	 *       —————————————————————    
	 */

	/**
	 * Return the number of documents that contains the token.
	 * @param token
	 * @return
	 * @throws IOException
	 */
	public int GetDocFreq(String targetedTerm) throws IOException {
		int [][] answer = GetPostingList(targetedTerm);
		return answer.length;
	}
	
	/**
	 * Return the total number of times the token appears in the collection.
	 * @param token
	 * @return
	 * @throws IOException
	 */
	public long GetCollectionFreq(String targetedTerm) throws IOException {
		int [][] answer = GetPostingList(targetedTerm);
		int cf = 0;
		for(int i = 0; i<answer.length;i++) {
			cf = cf + answer[i][1];
		}
		
		return cf;	
	}
	
	/**
	 * Close the index reader
	 * @throws IOException
	 */
	public void Close() throws IOException {
		dictMap.clear();
		docIDMap.clear();
		
		brForDictFlie.close();
		brForDocID.close();	
	}
	
//	/**
//	 * Get the whole postings lists about a term. Including its docID, tf, positions in every doc.
//	 * @param token
//	 * @return
//	 * @throws IOException
//	 */
//	public String getPostingsListsOf(String targetedTerm) throws IOException {
//		if(targetedTerm == null || !dictMap.containsKey(targetedTerm)) {
//			throw new IllegalArgumentException("Invalid token.");
//		}
//		
//		int ithLine = Integer.parseInt(dictMap.get(targetedTerm)); 
//		//int ithLine = Integer.parseInt(dictMap.getOrDefault(token, "-1"));
//	    
//	    InputStream fis = new FileInputStream("data//" + dataType + ".index");
//	    BufferedReader br = new BufferedReader(new InputStreamReader(fis));
//	    
//	    String[] targetedLine = new String[2];
//	    for (int i = 0; i < ithLine; i++) { // Search the targeted term line by line until reach the previous line of it
//	      br.readLine();
//	    }
//	    targetedLine = br.readLine().split("\\s"); 
//	    String postingsList = targetedLine[1]; // Extract the postings list part(exclude the term part, targetedLine[0])
//	    
//	    br.close();
//	    return postingsList;
//	}
//	/*
//	 * targetedLine[0]: key -> term
//	 * targetedLine[1]: value -> postings list(IntegerDocID, tf, positions)
//	 */
}