package de.recipeminer.tools;

import com.aliasi.dict.ExactDictionaryChunker;
import com.aliasi.dict.MapDictionary;
import com.aliasi.dict.TrieDictionary;
import com.aliasi.chunk.Chunk;
import com.aliasi.chunk.Chunker;
import com.aliasi.chunk.Chunking;

import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.TokenizerFactory;

import de.recipeminer.training.NER_Training;

import java.util.Iterator;
import java.util.Set;

import com.aliasi.dict.ApproxDictionaryChunker;
import com.aliasi.dict.DictionaryEntry;
import com.aliasi.dict.TrieDictionary;

import com.aliasi.spell.FixedWeightEditDistance;
import com.aliasi.spell.WeightedEditDistance;


/*
@author Georg Mühlenberg
@version 0.2
@date 25 Jan 2012
*/

public class RecipeRefinery {

    static final MapDictionary<String> DICT_STOPWORD = NER_Training.generateStopWordDictionary();
    static final MapDictionary<String> DICT_ACTIVITY_FEAT = NER_Training.generateSimpleDemoDictionary();
    static final TrieDictionary<String> DICT_INGREDIENT = NER_Training.generateIngredientDictionary();


public String vorverarbeiteText(String s)
{

    s = loescheWorteAusString(s, Stoppwortsuche(s));
    s = vereinheitlicheZutatennamen(s);
    return s;
}

    public String vereinheitlicheZutatennamen(String s)
    {
        TokenizerFactory tokenizerFactory
                = IndoEuropeanTokenizerFactory.INSTANCE;

        WeightedEditDistance editDistance
                = new FixedWeightEditDistance(0,-1,-1,-1,Double.NaN);

        double maxDistance = 10.0;


        ApproxDictionaryChunker chunker
                = new ApproxDictionaryChunker(DICT_INGREDIENT,tokenizerFactory,
                editDistance,maxDistance);


            System.out.println("\n\n " + s + "\n");
            Chunking chunking = chunker.chunk(s);
            CharSequence cs = chunking.charSequence();
            Set<Chunk> chunkSet = chunking.chunkSet();

            System.out.printf("%15s  %15s   %8s\n",
                    "Matched Phrase",
                    "Dict Entry",
                    "Distance");
            for (Chunk chunk : chunkSet) {
                int start = chunk.start();
                int end = chunk.end();
                CharSequence str = cs.subSequence(start,end);
                double distance = chunk.score();
                String match = chunk.type();
                System.out.printf("%15s  %15s   %8.1f\n",
                        str, match, distance);
            }

        //TODO: alle synonymkandidaten durch die besten matches im Zutatenwörterbuch ersetzen.
        return "";
    }
    
    
    public Set<Chunk> Stoppwortsuche(String s)
    {


        ExactDictionaryChunker StopWortSucherTT
                = new ExactDictionaryChunker(DICT_STOPWORD,
                IndoEuropeanTokenizerFactory.INSTANCE,
                true,true);

        Chunking chunking = StopWortSucherTT.chunk(s);
        return chunking.chunkSet();
    }
    
    public String loescheWorteAusString(String s, Set<Chunk> cs)
    {

        for(Iterator<Chunk> it = cs.iterator(); it.hasNext(); it.next())
        {
           ((Chunk) it).start();
            ((Chunk) it).end();
            ((Chunk) it).score();

           
            //TODO: methode noch leer - alle gefundenen und als chunks abgelegten einträge bearbeiten und String s entsprechend abändern.

        }
        return s;
    }

    


}
