package my.news;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Set;

import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;

/**
 * delete all document which contain same i-word strings
 * Bad
 * @author yura
 *
 */
public class DeleteDublicateProcessor implements DocumentSetProcessor {
    HashMap<Integer, Content> localhashToConent = new HashMap<Integer, Content>();
    private int wordLen;
//TODO: use this for better dublicate select
    private int shinglesToDublicateCount;

    public DeleteDublicateProcessor(int wordLen) {
        super();
        this.wordLen = wordLen;
    }



    public Set<Content> process(Set<Content> contents) {
        Set<Content> result = new HashSet<Content>(contents);
        main: for(Content c : contents) {
            StringBuilder sb = new StringBuilder();
            String[] tokens = IndoEuropeanTokenizerFactory.FACTORY.tokenizer(c.data.toCharArray(), 0, c.data.length()).tokenize();
            if(tokens.length<wordLen) {
                result.remove(c);
                continue;
            }
            for(int i=0; i<=tokens.length-wordLen;++i) {
                for(int j=0;j<wordLen;++j) {
                    sb.append(tokens[i+j]);
                    sb.append(" ");
                }
                int localHash = sb.toString().hashCode();
                if (localhashToConent.containsKey(localHash)) {
                   result.remove(c);
                   continue main;
                } else {
                    localhashToConent.put(localHash, c);
                }
                sb.setLength(0);
            }
        }
        return result;
    }

}
