package net.kem.utils.duplicatesfinder;

import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;

public abstract class BaseDuplicateFinder<K, T extends DuplicateCandidate<K, T>> {
	/**
	 * Contains map of objects that may be duplicated (duplicate candidates).
	 * The map key should be the object property that makes the object "duplicate-suspected"
	 * (for example, for files such a key may be file size: several files with the same size may (but not must) be duplicated).
	 */
	protected Map<K, List<T>> _duplicateCandidates;

	protected BaseDuplicateFinder() {
		_duplicateCandidates = new HashMap<K, List<T>>();
	}

	public void filterDuplicateCandidates() {
		// Walk through map entries to get rid of non-relevant duplicate candidates.
		for(Iterator<Map.Entry<K, List<T>>> iter = _duplicateCandidates.entrySet().iterator(); iter.hasNext();) {
			Map.Entry<K, List<T>> me = iter.next();
			// Get list of duplicate candidates.
			List<T> duplicateCandidates = me.getValue();
			//If the list of duplicate candidates contains more than one element then it's worth to consider it.
			if(duplicateCandidates.size() > 1) {
				LinkedList<T> duplicated = new LinkedList<T>();
				while(duplicateCandidates.size() > 0) {
					// Get first item in the list.
					T firstDuplicateCandidate = duplicateCandidates.get(0);
					// Compare first candidate with every others.
					for(Iterator<T> iterator = duplicateCandidates.iterator(); iterator.hasNext();) {
						T aCandidate = iterator.next();
						if(firstDuplicateCandidate != aCandidate && firstDuplicateCandidate.isDuplicate(aCandidate)) {
							duplicated.add(aCandidate);
							iterator.remove();
						}
					}
					// If at least one duplicate was found, then add the current item to the duplicated list.
					if(duplicated.size() > 0) {
						duplicated.add(firstDuplicateCandidate);
					}
					// Remove first item in the list.
					duplicateCandidates.remove(0);
				}
				// Only non-duplicated items remain in the list. Clear the list and populate it with detected duplicated items.
				duplicateCandidates.clear();
				duplicateCandidates.addAll(duplicated);
			}

			if(duplicateCandidates.size() < 2) {
				iter.remove();
			}
		}
	}

	public void report() {
		for(List<T> sameHashFiles: _duplicateCandidates.values()) {
			System.out.println("----------------------------------------------------");
			for(Iterator<T> iterator = sameHashFiles.iterator(); iterator.hasNext();) {
				T fileInfo = iterator.next();
				System.out.println(fileInfo);
				iterator.remove();
			}
		}
	}

	//System.out.println("Done");
	public static long printLog(CharSequence log, long prevTimeStamp) {
		long start = System.currentTimeMillis();
		if(prevTimeStamp > 0L) {
			System.out.println("Done. It took " + (start-prevTimeStamp) + " milliseconds.");
		}
		System.out.println("Start " + log);
		return start;
	}

	public Map<K, List<T>> getSameSize() {
		return _duplicateCandidates;
	}
}