package model;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Vector;

import utils.ScoredEntities;
import utils.Stemmer;

public class Collection {
	protected HashMap<String, Map<String, Integer>> collection;
	private HashMap<String, Double> docs = new HashMap<String, Double>();
	private ScoredEntities vocabulary;
	private double avgdl = 0;
	private final double k1 = 1.5;
	private final double b = 0.75;

	public Collection(String filePath, boolean ignoreCase, boolean justNoun,
			boolean enableStemming) {
		collection = new HashMap<String, Map<String, Integer>>();
		vocabulary = new ScoredEntities();
		load(filePath, false, ignoreCase, justNoun, enableStemming);
		initCollectionParameters(ignoreCase, enableStemming);
	}

	public Collection(List<String> fileList, boolean ignoreCase,
			boolean justNoun, boolean enableStemming) {
		collection = new HashMap<String, Map<String, Integer>>();
		vocabulary = new ScoredEntities();
		load(fileList, ignoreCase, justNoun, enableStemming);
		initCollectionParameters(ignoreCase, enableStemming);
	}

	/**
	 * Loads the documents from a given path
	 * 
	 * @param filePath
	 *            The path to the collection
	 * @param includeSubDir
	 *            Whether to include the sub dir
	 */
	private void load(String filePath, boolean includeSubDir,
			boolean ignoreCase, boolean justNoun, boolean enableStemming) {
		List<String> docs = addDirectory(filePath, includeSubDir);
		System.out.println(docs.size() + " documents found at " + filePath);
		if (docs.size() != 0) {
			for (String document : docs) {
				TaggedSlice doc = new TaggedSlice(new File(document));
				collection.put(doc.getId(),
						doc.toWords(ignoreCase, justNoun, enableStemming));
			}
		} else {
			System.out.println("No files found at " + filePath + " !");
			System.exit(0);
		}
	}

	/**
	 * Loads the documents from a list
	 */
	private void load(List<String> files, boolean ignoreCase, boolean justNoun,
			boolean enableStemming) {
		System.out.println(files.size() + " documents loaded from the list");
		if (files.size() != 0) {
			for (String document : files) {
				TaggedSlice doc = new TaggedSlice(new File(document));
				collection.put(doc.getId(),
						doc.toWords(ignoreCase, justNoun, enableStemming));
			}
		} else {
			System.out.println("No files in the given list.");
			System.exit(0);
		}
	}

	/**
	 * Load documents from a file path. All files, including those in the
	 * sub-dir of the path, will be included if includeSubDir set to true.
	 * 
	 * @param dir
	 *            The path to the collection
	 * @param includeSubDir
	 *            Whether to include the sub-directory
	 */
	public static List<String> addDirectory(String dir, boolean includeSubDir) {
		char sep = getSeparator();

		if (dir.charAt(dir.length() - 1) != sep) {
			dir = dir.concat("" + sep);
		}
		List<String> files = new ArrayList<String>();
		File root = new File(dir);
		if (root.isDirectory()) {
			for (String entry : root.list()) {
				File newItem = new File(dir + entry);
				if (newItem.isFile()) {
					files.add(dir + entry);
				} else if (newItem.isDirectory() && includeSubDir) {
					files.addAll(addDirectory(dir + sep + entry, true));
				}
			}
		} else {
			files.add(dir);
		}
		return files;
	}

	public void initCollectionParameters(boolean ignoreCase,
			boolean enableStemming) {
		System.out.println("Number of documents: " + collection.size());
		for (String docId : collection.keySet()) {
			Map<String, Integer> doc = collection.get(docId);
			int docLen = 0;
			for (String word : doc.keySet()) {
				docLen += doc.get(word);
				if (ignoreCase) {
					word = word.toLowerCase();
				}
				if (enableStemming) {
					Stemmer s = new Stemmer();
					word = s.stem(word);
				}
				if (!vocabulary.hasWord(word)) {
					vocabulary.addWord(word, 0);
				}
				vocabulary.updateWord(word, 1);
			}
			avgdl += docLen;
			docs.put(docId, 0.0);
		}
		avgdl = avgdl / collection.size();
		System.out.println("Number of unique words: " + vocabulary.size());
		System.out.println("Average document length: " + avgdl);
		// vocabulary.printVocabulary(this);
	}

	/**
	 * Rank documents with the BM25 weighting model
	 * 
	 * @param query
	 *            The query to be matched with
	 */
	public void rankDocsBM25(Query query, boolean enableStemming) {
		String[] queryTerms = query.toArray(enableStemming);
		for (String docId : docs.keySet()) {
			double queryScore = 0;
			double docLen = 0;
			for (String word : collection.get(docId).keySet()) {
				docLen += collection.get(docId).get(word);
			}
			for (String term : queryTerms) {
				double N = docs.size();
				double docFrequency = vocabulary.getScore(term);
				double termScore = Math.log((N - docFrequency + 0.5)
						/ (docFrequency + 0.5));
				if (termScore < 0) {
					// To rule out the common terms
					termScore = 0.001;
				}
				double termFrequency = collection.get(docId).containsKey(term) ? collection
						.get(docId).get(term) : 0;
				termScore = termScore
						* (termFrequency * (k1 + 1) / (termFrequency + k1
								* (1 - b + b * docLen / avgdl)));
				queryScore += termScore;
			}
			docs.put(docId, queryScore);
		}
		int docCount = 0;
		for (String docId : docs.keySet()) {
			if (docs.get(docId) != 0) {
				System.out.println(docId + ":" + docs.get(docId));
				docCount++;
			}
		}
		System.out.println(docCount + " documents with hits");
	}

	/**
	 * Get a list of the docIDs of the N top ranking documents
	 * 
	 * @param N
	 *            The number of top-ranking docs to be returned
	 * @return The list of document ids
	 */
	public String[] topNDocs(int N) {
		String docList[] = new String[N];
		List<String> allDocs = sortByValueDesc(docs);
		for (int i = 0; i < allDocs.size() && i < N; i++) {
			docList[i] = allDocs.get(i);
		}
		return docList;
	}

	/**
	 * Extract the concepts from the top N documents
	 * 
	 * @param N
	 *            The number of top-ranking documents to be dealt with
	 * @return The concepts
	 */
	public Set<String> getConcepts(int N) {
		List<String> docList = sortByValueDesc(docs);
		HashSet<String> concepts = new HashSet<String>();
		for (int i = 0; i < N; i++) {
			// System.out.println(docList.get(i) + " : "
			// + docs.get(docList.get(i)));
			Vector<String> words = new Vector<String>();
			for (String word : collection.get(docList.get(i)).keySet()) {
				words.add(word);
			}
			concepts.addAll(words);
		}
		return concepts;
	}

	public ScoredEntities getVocabulary() {
		return vocabulary;
	}

	public static List sortByValueDesc(final Map m) {
		List keys = new ArrayList();
		keys.addAll(m.keySet());
		Collections.sort(keys, new Comparator() {
			public int compare(Object o1, Object o2) {
				Object v1 = m.get(o1);
				Object v2 = m.get(o2);
				if (v1 == null) {
					return (v2 == null) ? 0 : 1;
				} else if (v1 instanceof Comparable) {
					return ((Comparable) v2).compareTo(v1);
				} else {
					return 0;
				}
			}
		});
		return keys;
	}
	
	public static List sortByKeysAscd(final Map m) {
		List keys = new ArrayList();
		keys.addAll(m.keySet());
		Collections.sort(keys);
		return keys;
	}
		
	/**
	 * Save the collection to document vectors that consists of the words given
	 * in a set
	 */
	public void toArff(Set<String> words, String fileLocation, HashMap<String, String> rel) {
		try {
			PrintWriter writer = new PrintWriter(fileLocation);
			// Header first
			writer.write("@relation collection" + collection.size() + "docs"
					+ words.size() + "words\n\n");

//			writer.write("@attribute postId string\n");
			for (String word : words) {
				if(word.equals("class")) {
					word = "word_class";
				}
				writer.write("@attribute " + word + " numeric\n");
			}
			writer.write("@attribute class {2,4}\n");

			writer.write("\n@data\n");

			for (String docId : collection.keySet()) {
				Map<String, Integer> doc = collection.get(docId);
				int count = 0;
				String line = "{";
				//line = line.concat("0 '" + docId + "',");
				for (String word : words) {
					if (doc.containsKey(word)) {
						line = line.concat(count + " " + doc.get(word) + ",");
					}
					count++;
				}
				line = line.concat(count + " " + rel.get(docId));
				writer.write(line + "}\n");
			}
			writer.close();
		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

	}

	public static char getSeparator() {
		if (System.getProperty("os.name").contains("Windows")) {
			return '\\';
		} else {
			return '/';
		}
	}

}
