/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */
package plagiadetector.detector;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.logging.Level;
import java.util.logging.Logger;
import plagiadetector.datatype.GramCounter;
import plagiadetector.datatype.TextSegment;
import weka.core.tokenizers.NGramTokenizer;


/**
 * Implementation of Oberreuter et al.'s system.
 * Modification of OberDetector.java, where the last window of segmentation
 * will be deleted if it's size isn't greater than half of the window size.
 * @author Dell
 */
public class OberDetectorExp2 extends Detector {

    private int ngram; //n-gram size
    private int windowsize; //size of a window (number of words)
    private float plagthreshold; //plagiarism threshold
    private float docstyle; //document style
    private NGramTokenizer NGT; //tokenizer, used for extracting n-grams
    
    private boolean isPassed; // kalau dibawah mindocwords, dokumennya di-pass.
    private int mindocwords; // defaultnya 400.
            
    public ArrayList<String> docgrams; //n-grams from the whole text
    public HashMap<String, GramCounter> docgramfrequency; //frequency of n-gram from the whole document
    
    public ArrayList<ArrayList<String>> windowgrams; //n-grams from each sliding window
    public ArrayList<HashMap<String, GramCounter>> windowgramfrequency; // frequency of n-gram from each sliding window
    
    public ArrayList<Float>  windowstyle; //style of each window
    
    
    @Override
    public void doPreprocessing() {
          this.preprocessedText = preprocessText(getRawText());
    }
    
    @Override
    public String preprocessText(String s){
        return s.toLowerCase().replaceAll("[^a-z\\s+]", " ");
    }
    
    /**
     * Segment the text into windows - each window is raw text that contains words
     * as much as the windowsize (except the last window, that might be
     * less than 200 words because of limit of the text)
     */
    @Override
    public void doSegmentation() {
        
        StringBuilder textbuff = new StringBuilder();
        int textoffset = 0; //first window offset
        int i = 0;
        int wordcount = 0;
        boolean buffering = false;
               
        //segment the text by whitespace - because the preprocess has finished all the problem.
        while(i < getPreprocessedText().length()){
           
            if(getPreprocessedText().charAt(i) != ' '){//if the char is not a whitespace
               if(!buffering){//if buffering new word
                   buffering = true;
                   wordcount++; //add the word counter
               }           
               
            }else{ //if it's a whitespace
               if(buffering){//if its the end of word buffering
                   
                   if(wordcount == windowsize){
                       // if the amount of word in the window is enough
                       
                       //add as a segment
                       addTextSegment(textbuff.toString(), textoffset);
                       //increase window counter
                       
                       //reset corresponding variables
                       wordcount = 0;
                       textbuff.replace(0, textbuff.length(), ""); //prepare to buffer the next window
                       textoffset = i; //next window offset
                   }
                   
                   //end the buffering phase
                   buffering = false;   
               } //if not buffering word? leave it be.
            }
            
            textbuff.append(getPreprocessedText().charAt(i)); //whatever char it may be, add it.
            i++; //move to next char.
        }
        
        if(wordcount >= (windowsize/2)){ //if the last window ended because the text is not sufficient, but bigger than half of the window, add it.            
            addTextSegment(textbuff.toString(), textoffset);        
        }
        
        //release textbuff
        textbuff = null;
    }

    /**
     * This phase consist of :
     * 1. Extraction of n-grams for each windows
     * 2. Extraction of n-grams from the whole text
     */
    @Override
    public void doStylometricExtraction() {
       
        //extraction for each windows and the whole text.
       int numtextsegments = getNumOfTextSegments();
       ArrayList<String> buff;
       for(int i=0; i < numtextsegments; i++){
            buff = extractNGrams(getTextSegment(i));
            windowgrams.add(buff); //windowgrams adds the buff itself.
            docgrams.addAll(buff);//docgrams adds the buff contents.
        }
        
    }

    /**
     * Stylometric Quantification :
     * 1. Create vector of n-grams for the whole document (not normalized)
     * 2. Create vector of n-grams for each window (not normalized)
     * 3. Count style for each window
     * 4. Count style for the whole document
     * 
     * Stylometric Analysis :
     * 1. Compare style of each window with style of the whole document
     * 2. Mark every window which style is lower than document more than defined threshold
     */
    @Override
    public void doStylometricQA() {
       
        if(!isPassed){

            // Quantification
            buildDocNGramFrequency();
            buildWindowNGramFrequency();
            buildWindowStyle();
            buildDocStyle();


            //Analysis
            int x = 0;
            boolean found = false;
            int textlengthbuff = 0;
            int windowstylesize = windowstyle.size();
            //[NORMAL] Detect each window as one case.
    /*        
            for(int i = 0; i < windowstyle.size(); i++){
                //compare each window
                if(windowstyle.get(i) < getDocstyle()-getPlagthreshold()){
                    addCase(getTextSegment(i));
                }            
            }
    */              
            //[EXTRA]Join every window that sides one with another.
            for(int i = 0; i < windowstylesize; i++){
                //compare each window
                if(windowstyle.get(i) < getDocstyle()-getPlagthreshold()){
                    //if fits, mark as plagiarism case

                    if(!found){//if this is the first detected segment
                        x = i;
                        found = true;
                        textlengthbuff = getTextSegment(i).segmentText.length();
                    }else{ //if the segment before this was marked too
                        textlengthbuff += getTextSegment(i).segmentText.length();
                    }
                }else if(found){
                    // if the segment before this was detected as plagiarism case, but this segment isn't
                    addCase(getTextSegment(x).offset, textlengthbuff);
                    found = false;
                }            
            }

            if(found){
                //if the last window is plagiarism case, and haven't added to the case data
                addCase(getTextSegment(x).offset, textlengthbuff);
            }
        }
    }

        /**
     * Constructor of OberDetector object.
     */
    public OberDetectorExp2(){
        super();
        initialize();
        resetParameters();
    }
    
    /**
     * Constructor of OberDetector object.
     * @param s Raw text for this object.
     */
    public OberDetectorExp2(String s){
        super();
        setRawText(s);
        initialize();
        resetParameters();
    }
    
    @Override
    public void initialize() {
        super.initialize();
        
        if(NGT == null){
            NGT = new NGramTokenizer();
        }
        
        if(docstyle != 0){     
            docstyle = 0;
        }

        if(docgrams == null){
            docgrams = new ArrayList<String>();
        }else{
            docgrams.clear();
        }
        
        if(docgramfrequency == null){
            docgramfrequency = new HashMap<String, GramCounter>();
        }else{
            docgramfrequency.clear();
        }
        
        if(windowgrams == null){
            windowgrams = new ArrayList<ArrayList<String>>();
        }else{
            windowgrams.clear();
        }
        
        if(windowgramfrequency == null){
            windowgramfrequency = new ArrayList<HashMap<String, GramCounter>>();
        }else{
            windowgramfrequency.clear();
        }
        
        if(windowstyle == null){
            windowstyle = new ArrayList<Float>();
        }else{
            windowstyle.clear();
        }
    }
    
    /**
     * Set the character n-gram size of this object.
     * @param n preferred n-gram size.
     */
    public void setNGramSize(int n){
        ngram = n;
        try {
            NGT.setOptions(new String[]{"-min", ""+ngram, "-max", ""+ngram});
        } catch (Exception ex) {
            Logger.getLogger(OberDetectorExp2.class.getName()).log(Level.SEVERE, null, ex);
        }

    }
    
    
    /**
     * Get the character n-gram size of this object.
     * @return this object's n-gram size.
     */
    public int getNGramSize(){
        return ngram;
    }

        /**
     * Gets the number of sliding windows to process the document.
     * @return number of sliding windows.
     */
    public int getNumOfWindows(){
        int result;
        result = getNumOfTextSegments()/getWindowSize();
        result += getNumOfTextSegments()%getWindowSize() > 0 ? 1 : 0;
        return result;
    }
    /**
     * Gets the number of terms found in this document.
     * @return number of terms.
     */
    public int getNumOfTerms(){
        return docgramfrequency.size();
    }

    /**
     * @return the windowsize
     */
    public int getWindowSize() {
        return windowsize;
    }

    /**
     * @param windowsize the windowsize to set
     */
    public void setWindowSize(int windowsize) {
        this.windowsize = windowsize;
    }

    /**
     * @return the plagthreshold
     */
    public float getPlagthreshold() {
        return plagthreshold;
    }

    /**
     * @param plagthreshold the plagthreshold to set
     */
    public void setPlagthreshold(float plagthreshold) {
        this.plagthreshold = plagthreshold;
    }

    /**
     * @return the docstyle
     */
    public float getDocstyle() {
        return docstyle;
    }

    /**
     * @param docstyle the docstyle to set
     */
    public void setDocstyle(float docstyle) {
        this.docstyle = docstyle;
    }

    /**
     * Extract n-grams from given text.
     * @param text input text.
     * @return ArrayList of the n-grams.
     */
    public ArrayList<String> extractNGrams(String text){
        ArrayList<String> ngramlist = new ArrayList<String>();
        
        NGT.tokenize(text);
        while(NGT.hasMoreElements()){
            ngramlist.add((String)NGT.nextElement());
        }
        
        return ngramlist;
    }
    
    /**
     * Extract n-grams from given text segment.
     * @param textseg TextSegment object.
     * @return ArrayList of the n-grams.
     */
    public ArrayList<String> extractNGrams(TextSegment textseg){
        return extractNGrams(textseg.segmentText);
    }
    
    /**
     * Extract unique n-grams and its frequencies from given ArrayList of String.
     * @param als ArrayList of String object.
     * @return ArrayList of GramCounter of unique n-grams
     */
    public HashMap<String, GramCounter> extractNGramFrequency(ArrayList<String> als){
        HashMap<String, GramCounter> agc = new HashMap<String, GramCounter>();
        
        int alsize = als.size();
        GramCounter gc;        
        
        for(int i = 0; i < alsize; i++){
            gc = agc.get(als.get(i));
            
            if(gc == null){// new n-gram
                agc.put(als.get(i), new GramCounter(als.get(i), 1f));
            }else{
                //if exist, increase occurence.
               ((GramCounter) agc.get(als.get(i))).occurrence++;
            }
        }
        
        gc = null;
        return agc;
    }
    
    /**
     * Build n-gram frequency for the whole document.
     */
    public void buildDocNGramFrequency(){
        docgramfrequency = extractNGramFrequency(docgrams);
    }
    
    /**
     * Build style amount for each window.
     */
    public void buildWindowStyle(){

        float wsbuff; //buffer for windows style
        Iterator i;
        Map.Entry me;
        GramCounter gc;
        
        for(HashMap<String, GramCounter> agc : windowgramfrequency){
        //for each window
            wsbuff = 0f;
        
            i = agc.entrySet().iterator();
            
            while(i.hasNext()){
            //for each n-gram
                me = (Entry) i.next();
                
                //search corresponding n-gram
                gc = docgramfrequency.get((String)me.getKey());      
                
                if(gc == null){
                    System.out.println("no such word : "+me.getKey());
                }else{
                    wsbuff += (Math.abs(gc.occurrence - ((GramCounter)me.getValue()).occurrence) / Math.abs(gc.occurrence + ((GramCounter)me.getValue()).occurrence));
                }        
            }
            
            windowstyle.add(wsbuff);
        }
        
        //free them.
        i = null;
        me = null;
        gc = null;
    }
    
    /**
     * Build style for the whole document.
     */
    public void buildDocStyle(){
        docstyle = 0f;
        for(Float f : windowstyle){
            docstyle += f;
        }
        
        docstyle /= (float) windowstyle.size(); 
    }
    /**
     * Build n-gram frequency for each window.
     */
    public void buildWindowNGramFrequency(){
        for(ArrayList<String> als : windowgrams){
            windowgramfrequency.add(extractNGramFrequency(als));
        }
    }

    @Override
    public void resetParameters() {
             
        if(ngram != 1){
            setNGramSize(1); //sets the NGT option too.
        } //default size
        
        if(windowsize != 400){
            windowsize = 400;
        }
        
        if(plagthreshold != 0.075f){
            plagthreshold = 0.075f;
        }
        
    }
}
