
package textmining.gpanalysis;

import data.Post;
import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.SortedSet;
import org.apache.commons.collections.Bag;
import org.apache.commons.collections.bag.HashBag;
import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WordlistLoader;
import org.apache.lucene.analysis.snowball.SnowballAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.util.Version;
import org.apache.solr.analysis.SynonymFilter;
import org.apache.solr.analysis.SynonymMap;

/**
 * generate keyword vector of a certain post used for similarity calculation
 * @author ibrahimsabek
 */

public class KeywordVectorGenerator {
    protected Post post;
    protected static final String stemmerType = "English";
    protected Analyzer stemAnalyzer;
    protected HashSet stopWordsSet;
    private List<Token> rawTokens;

    protected Reader[] PIReaders;
    protected Reader[] remReaders;
    protected Bag PITokenBag;     //first paragraph, last paragraph and tags token bags
    protected Bag remPITokenBag;   // all paragraphs word bags, except first, last pargraph and tags.

    private WordNetSynLoader wordNetSynLoader;
    HashMap<String, Double> keywordVector;

    /**
     * it takes an argument Post after it has been filled with its string sentences
     */
    public KeywordVectorGenerator(Post post) throws IOException{
        this.post = post;
        init();  //initial configuration for the analyzer used in this post
       
        //create token Bags  
        if(post.getParaReaders().length <= 2 ){
            
            //TF of any word at (P-PI) is expected to be small(suggested value)
            PIReaders = new Reader[post.getParaReaders().length];
            for(int i = 0; i < post.getParaReaders().length; i++){
                PIReaders[i] = post.getParaReaders()[i];
            }
            this.PITokenBag = createTokenBag(PIReaders);
        }else{
            PIReaders = new Reader[]{post.getParaReaders()[0],post.getParaReaders()[post.getParaReaders().length - 1]};
            this.PITokenBag = createTokenBag(PIReaders);
            
            remReaders = new Reader[post.getParaReaders().length - 2 ];
            for(int i = 1; i < post.getParaReaders().length-1; i++){
                remReaders[i-1] = post.getParaReaders()[i];
            }
            this.remPITokenBag = createTokenBag(remReaders);
        }
          
    }

    //create suitable configuration for the analyzer
    protected void init() throws IOException{
        stemAnalyzer = new SnowballAnalyzer(Version.LUCENE_29, stemmerType);
        stopWordsSet = WordlistLoader.getWordSet(new File("reservedWords/StopWordsList.txt"));
        wordNetSynLoader = new WordNetSynLoader();
    }

    public Object generateKeywordsContainer(){return null;}

    //create wordset will be used in the Keyword Vector
    protected SortedSet<String> createWordSet(Post[] post){return null;}

    //get token frequency relative to a certain set of tokenBags
    protected int getTokenFrequency(String token, Bag tokenBag){
        if(tokenBag.getCount(token) > 0){
            return tokenBag.getCount(token);
        }else{
            return 0;
        }
    }

    //get token frequency relative to the whole post
    protected int getTokenPostFrequency(String token){
        if(post.getParaReaders().length <= 2 ){
            return getTokenFrequency(token, PITokenBag);
        }else{
            return getTokenFrequency(token, PITokenBag)+getTokenFrequency(token, remPITokenBag);
        }
    }

    protected Bag createTokenBag(Reader[] paraReaders) throws IOException{
        Bag tokenBag = new HashBag();

        //loop on all required paragraphs
        for(Reader paraReader : paraReaders){
            //ArrayList<String> tokens = stemParaTokens(paraReader);
             ArrayList<String> tokens = refineParaTokens(paraReader);

            //if first time, put it in the bag. else, increment its counter
             for(String token : tokens){
                tokenBag.add(StringUtils.lowerCase(token));
             }
        }

        return tokenBag;
    }

    //stemm and remove stopwords from paragraph tokens
    protected ArrayList<String> stemParaTokens(Reader paraReader) throws IOException{
        TokenStream tStream = stemAnalyzer.tokenStream("snowballStemField", (StringReader)paraReader);
        StopFilter stopWordsFilter = new StopFilter(true,tStream,stopWordsSet);

        ArrayList<String> refinedTokens = new ArrayList<String>();
        Token t = new Token();

        while((t = stopWordsFilter.next(t) )!= null){
            String tmp = t.term();
            refinedTokens.add(tmp);
        }

        return refinedTokens;
    }

    //stemm and remove stopwords from paragraph tokens
    protected ArrayList<String> refineParaTokens(Reader paraReader) throws IOException{
        Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_29, stopWordsSet);
        TokenStream tStream = analyzer.tokenStream("standardTokenizeField",(StringReader)paraReader);

        SynonymMap synMap = getSynonymMap(tStream);
       
        TokenStream ts = new TokenStream() {
            Iterator iter = rawTokens.iterator();

            @Override
            public Token next() throws IOException {
                return iter.hasNext() ? (Token) iter.next() : null;
            }
        };

        SynonymFilter synFilter = new SynonymFilter(ts, synMap);
        
        ArrayList<String> refinedTokens = new ArrayList<String>();
        Token t = new Token();

        while((t = synFilter.next(t))!= null){

            String tmp = t.term();
            refinedTokens.add(tmp);
        }
        return refinedTokens;
    }

    private SynonymMap getSynonymMap(TokenStream tStream) throws IOException{
         SynonymMap synMap = new SynonymMap(true);

         rawTokens = new ArrayList<Token>();

         Token t = new Token();
         while((t = tStream.next(t) )!= null){

             Token newToken = new Token();
             newToken.setTermText(t.term());
             rawTokens.add(newToken);

            String tmp = t.term();
            List<String> tmpSyn = wordNetSynLoader.loadSynonymsList(tmp);
            if(tmpSyn != null){
                Iterator i = tmpSyn.iterator();
                while(i.hasNext()){
                    String currSyn = (String)i.next();

                    Token tmpToken = new Token();
                    tmpToken.setTermText(currSyn);

                    List<Token> tmpTokenList = new ArrayList<Token>();
                    tmpTokenList.add(tmpToken);

                    synMap.add(Arrays.asList(tmp.split(" ")), tmpTokenList, false, true);
                }
            }

         }

         Token tok = new Token();
         tok.setTermText("fallen down");
         List<Token> tmp1 = new ArrayList<Token>();
         tmp1.add(tok);
         synMap.add(Arrays.asList("fallen down".split(" ")), tmp1, false, true);

         return synMap;
    }

     //reads text from Reader
     protected String getText(Reader reader) throws Exception {
        StringBuilder textBuilder = new StringBuilder();
        char[] charBuf = new char[1024];
        int len = 0;
        while ((len = reader.read(charBuf, 0, 1024)) != -1) {
          textBuilder.append(ArrayUtils.subarray(charBuf, 0, len));
        }
        reader.close();
        return textBuilder.toString();
      }

     public HashMap<String, Double> getMaxVector(){
        return null;
     }

     public HashMap<String, Double> getKMaxVector(){
        return null;
     }

    public Bag getPITokenBag() {
        return PITokenBag;
    }

    public Bag getRemPITokenBag() {
        return remPITokenBag;
    }

    public void setPosts(Post[] posts){
    }


     
}
