package domain;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.StringTokenizer;

import model.newmodel.PostingsList;
import model.newmodel.Term;

/**
 * 
 * @author Ivan Khrisanov, Segriy Samus, Boulat Oulmachev ; Latest reversions: Fabrice Bloncourt, Paul Smelser and Simon Symeonidis
 * @since December 3, 2010
 * @version This version added scoring using TF-IDF and clustering.
 * This class handles all interaction with existing files. It merges the dictionaries, inserts TF-IDF and Clusters.
 */
public class BlockMerger {

	/**
	 * 
	 * @param path The local path to the dictionary files.
	 * @throws Exception
	 * Uses java's IO to merge dictionaries. The folders are taken from the directory indicated by the parameter "path' and a new file is created.
	 */
    public static void execute(String path) throws Exception {

        long startTime = System.currentTimeMillis();
        System.out.print("Merging...");

        /*
         * Opening and checking directory
         */
        String tmpDictPath = path + "tmp-index/";
        File dir = new File(tmpDictPath);
        if (!dir.exists()) {
            System.out.println("Wrong path: " + tmpDictPath + "\nNothing to merge\nPlease check your path");
            System.exit(0);
        }

        /*
         *  Getting BufferedReader for each block (i.e. temporary dictionary)
         */
        String[] files = dir.list();
        int chkDictFiles = 0;
        List<BufferedReader> blockReaders = new ArrayList<BufferedReader>();
        for (String file : files) {
            if (file.matches("([^\\s]+(\\.(?i)(dict))$)")) {
                chkDictFiles++;
                blockReaders.add(new BufferedReader(new FileReader(new File(dir + "/" + file))));
            }
        }
        if (chkDictFiles == 0) {
            System.out.println("No dict files found on the path: " + tmpDictPath + "\nNothing to merge\nPlease check your path");
            System.exit(0);
        }

        /*
         *  Reading first term-posting list pair from each block (i.e. temporary dictionary)
         */
        List<TermListPair> currentTermPostingListPairs = new ArrayList<TermListPair>(blockReaders.size());
        for (int i = 0; i < blockReaders.size(); i++) {
            currentTermPostingListPairs.add(i, new TermListPair(blockReaders.get(i).readLine()));
        }
        /*
         *  Preparing finalindex.dict (final merged index) file to write into
         */
        FileOutputStream fos = new FileOutputStream(path + "finalindex.dict");
        OutputStreamWriter osw = new OutputStreamWriter(fos, "UTF-8");

        /*
         * For each block
         */
        while (blockReaders.size() > 0) {
            /*
             *  To hold index or indices of block(s) where the next lexicographically smallest term is found
             */
            ArrayList<Integer> indicesOfBlocksWithSmallestNextTerm = new ArrayList<Integer>(blockReaders.size());

            indicesOfBlocksWithSmallestNextTerm.add(0);
            String smallestWord = currentTermPostingListPairs.get(0).getTerm();
            /*
             *  Find the next smallest term and the associated posting lists
             */
            for (int i = 1; i < currentTermPostingListPairs.size(); i++) {
                String currentWord = currentTermPostingListPairs.get(i).getTerm();
                if (currentWord.compareTo(smallestWord) < 0) {
                    indicesOfBlocksWithSmallestNextTerm.clear();
                    indicesOfBlocksWithSmallestNextTerm.add(i);
                    smallestWord = currentWord;
                } else if (currentWord.compareTo(smallestWord) == 0) {
                    indicesOfBlocksWithSmallestNextTerm.add(i);
                }
            }

            /*
             *  Merge the posting lists from the different blocks for the current term and read the next line from the blocks
             *  that were used to retrieve the current term
             */
            PostingsList mergedPostingList = null;
            osw.write(currentTermPostingListPairs.get(indicesOfBlocksWithSmallestNextTerm.get(0)).getTerm());


            for (int i = indicesOfBlocksWithSmallestNextTerm.size() - 1; i >= 0; i--) {
                int indexOfBlockToProcess = indicesOfBlocksWithSmallestNextTerm.get(i);

                /*
                 * 'entry-safe' method merge is used instead of binary union.
                 * See PostingsList.java for more details.
                 */
                mergedPostingList = PostingsList.merge(mergedPostingList, currentTermPostingListPairs.get(indexOfBlockToProcess).getPostingsList());

                String szTermListPair = blockReaders.get(indexOfBlockToProcess).readLine();
                if (szTermListPair == null) {
                    currentTermPostingListPairs.remove(indexOfBlockToProcess);
                    blockReaders.get(indexOfBlockToProcess).close();
                    blockReaders.remove(indexOfBlockToProcess);
                } else {

                    TermListPair termListPair = new TermListPair(szTermListPair);
                    currentTermPostingListPairs.set(indexOfBlockToProcess, termListPair);
                }
            }

            osw.write(mergedPostingList.toString() + "\n");
        }
        osw.close();

        System.out.println(" (" + (System.currentTimeMillis() - startTime) + " ms.)");
    }

    /**
     * 
     * @param path The path to the file to be read and clustered
     * @throws IOException
     * Uses java's IO to cluster documents in a .dict file created by the function "execute".
     */
    public static void cluster(String path) throws IOException{
    	
    	BufferedReader reader = new BufferedReader(new FileReader(new File(path + "/finalindexidf.dict")));
        FileOutputStream fos = new FileOutputStream(path + "finalindexcluster.dict");
        OutputStreamWriter osw = new OutputStreamWriter(fos, "UTF-8");
        
        int i=0;//iterator for the while loop
        float jFloat=0;
        String textLine;
        ArrayList<String> arr=new ArrayList<String>();
        ArrayList<String> clusterArray= new ArrayList<String>();
        String tokens[];
        
        /*
         * creating array to be clustered
         */
        while ((textLine = reader.readLine()) != null) {
        	tokens=textLine.split(":");	
        	arr.add(tokens[2]);
        	arr.add(tokens[1] + ":" + tokens[0]);
        }
        /*
         * selecting centroid
         */
        for(i=0; i<(arr.size()-100);i=i+100){ // must be even
        	clusterArray.add(arr.get(i) + ":" + arr.get(i+1));
        	arr.remove(i);
        	arr.remove(i);
        }
        
        Collections.sort(clusterArray);
        
        /*
         * consolidating repeat centroids
         */
        for(i=0; i<clusterArray.size()-1; i++)
        {
        	if ( clusterArray.get(i).split(":")[0].equals(clusterArray.get(i+1).split(":")[0]) )
        	{
        	  clusterArray.set(i, clusterArray.get(i) + "#" + clusterArray.get(i+1));
        	  clusterArray.remove(i+1);
        	  i--;
        	}
        }
        /*
         * iterating though the terms and choosing the closest centroid
         */
        for(i=0; i<(arr.size());i=i+2)
        {
        	boolean check=true;
        	int j=1;//iterator integer for while loop
        	float insert = Float.parseFloat(arr.get(i).split(":")[0]);//the TF-IDF for the term being added to a cluster
        	/*
        	 * iterates through lists and places the term in the closest matching cluster
        	 */
        	while(insert<(Float.parseFloat(clusterArray.get(j).split(":")[0]))){
        		if(clusterArray.size()==j+1){
        			if(insert<=(Float.parseFloat(clusterArray.get(1).split(":")[0]))){
        				clusterArray.set(0, clusterArray.get(0)+"#"+arr.get(i)+":"+arr.get(i+1));
        			}
        			else
        			{
        			clusterArray.set(j, clusterArray.get(j)+"#"+arr.get(i)+":"+arr.get(i+1));
        			}
        			check=false;
        			break;
        		}
        		j++;
        		jFloat=Float.parseFloat(clusterArray.get(j).split(":")[0]);
        	}
        	if (check)
        	{
        		if(insert==jFloat)
        		{
        			clusterArray.set(j, clusterArray.get(j)+"#"+arr.get(i)+":"+arr.get(i+1));
        		}
        		else
        		{
        			float lower=jFloat-insert;
        			float higher=Float.parseFloat(clusterArray.get(j+1).split(":")[0])-insert;
        			if(lower<higher)
        			{
        				clusterArray.set(j, clusterArray.get(j)+"#"+arr.get(i)+":"+arr.get(i+1));
        			}
        			else
        			{
        				clusterArray.set(j, clusterArray.get(j+1)+"#"+arr.get(i)+":"+arr.get(i+1));
        			}
        		}
        	}
        	
        }
        for(i=0; i<clusterArray.size(); i++){
        	osw.write(clusterArray.get(i)+"\n");
        }
        osw.close();
        fos.close();
    }
    /**
     * 
     * @param path The path to the file needing TF-IDF calculations
     * @param totalCount Number of documents in the collection
     * @throws Exception
     * Uses Java's IO to calculate the TF-IDF and writes the result into a new file
     */
    public static void calculate(String path, int totalCount) throws Exception{

        System.out.print("Calculating...");
        /*
         *  Opening an dict file
         */
        BufferedReader reader = new BufferedReader(new FileReader(new File(path + "/finalindex.dict")));
        FileOutputStream fos = new FileOutputStream(path + "finalindexidf.dict");
        OutputStreamWriter osw = new OutputStreamWriter(fos, "UTF-8");
        
        /*
         * creating parameters needed for functions
         */
        String textLine;
        int separatorIndex = 0;
        int docId = 0;
        int termOcc = 0;
        int idf=0;
        float tfidf;
        /*
         *  Opening and checking directory
         */
        File dir = new File(path);
        if (!dir.exists()) {
            System.out.println("Wrong path: " + path + "\nNothing to calculate\nPlease check your path");
            System.exit(0);
        }

        
        /*
         * reading input file and computing TF-IDF
         */
        while ((textLine = reader.readLine()) != null) {
            	separatorIndex = textLine.lastIndexOf("[");
                if (separatorIndex!=0){
                	int docCount=0;
            		Term term = Term.newInstance(textLine.substring(0, separatorIndex));
	                textLine=textLine.substring(separatorIndex+1,textLine.length()-1);
	            	System.out.println(term);
	                StringTokenizer st = new StringTokenizer(textLine);
	                while (st.hasMoreTokens()) {
	                	docCount++;
	                    String termInfo = st.nextToken();
	                    termInfo = termInfo.replace(",", "");
	                    separatorIndex = termInfo.lastIndexOf(":");
	                    docId = Integer.parseInt(termInfo.substring(0,separatorIndex));
	                    termOcc = Integer.parseInt(termInfo.substring(separatorIndex+1,termInfo.lastIndexOf(".")));
	                    
	                }
	                
	                idf = totalCount / docCount;
	                tfidf = (float) Math.log(termOcc * idf);
	                System.out.println("tfidf: "+tfidf);
	                osw.write(docId+ ":"+term+":"+tfidf+"\n");
	            }
                
        }
        osw.close();
        fos.close();
    }
    /**
     * 
     * @author Ivan Khrisanov | Segriy Samus | Boulat Oulmachev
     *
     */
    public static class TermListPair {

        private Term term;
        private PostingsList pL;

        public TermListPair(String termListPairString) {
            this.term = Term.fromIndexString(termListPairString);
            this.pL = PostingsList.fromIndexString(termListPairString);
        }

        public String getTerm() {
            return term.getTerm();
        }

        public PostingsList getPostingsList() {
            return pL;
        }

        @SuppressWarnings("unused")
		private TermListPair() {
        }
    }
}
