import java.io.File;
import java.io.FileNotFoundException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Scanner;
import java.util.Set;
import java.util.TreeSet;


/*Copyright  Feliks Rozenberg

                  Licensed under the Apache License, Version 2.0 (the "License");
                  you may not use this file except in compliance with the License.
                  You may obtain a copy of the License at

                      http://www.apache.org/licenses/LICENSE-2.0

                  Unless required by applicable law or agreed to in writing, software
                  distributed under the License is distributed on an "AS IS" BASIS,
                  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
                  See the License for the specific language governing permissions and
                  limitations under the License.*/


public class WordSenseProgram 
  
{
  public static void main(String[] args)
  {
    String fileName="Europarl-1M-en.txt";
    Scanner input = null;
    try
    {
      input = new Scanner (new File(fileName), "UTF-8");
    }
    catch(FileNotFoundException e)
    {
      System.out.println("Could not open the file " + fileName);
      System.exit(0);
    }
    //creating a linkedlist and filling it in with nulls
    LinkedList<Word> wordsInContext = new LinkedList<Word>();
    for(int i=0; i<101; i++)
      wordsInContext.add(null);
    //System.out.println("Size of words in context array list: " + wordsInContext.size());
    ArrayList<LinkedList<Word>> contextsByMiddleWord = new ArrayList<LinkedList<Word>>();
    String[] lemmaAndPOS;
    int counter = 0;
    boolean arrayListFilled = false;
    System.out.println("The program is extracting word pairs, please wait!");
    //going over the input, putting in words in contexts and sliding the window to always match the required size of context
    while(input.hasNextLine())
    {
      if(arrayListFilled==true)
        wordsInContext.remove(0);
      String line = input.nextLine();
      lemmaAndPOS = line.split("\\t");
      if( (lemmaAndPOS[1].startsWith("NN")) || (lemmaAndPOS[1].startsWith("JJ")) 
           || (lemmaAndPOS[1].startsWith("VB")) )
      {
        wordsInContext.add(Word.newWord(lemmaAndPOS[2], lemmaAndPOS[1]));
      }
      else
        wordsInContext.add(null);
      if(arrayListFilled==false)
        counter++;
      if(counter==101)
        arrayListFilled = true; 
      if(wordsInContext.get(50)!=null)
        if( (wordsInContext.get(50).getLemma().equals("market")) || (wordsInContext.get(50).getLemma().equals("country")))
      {
        LinkedList<Word> wordInContext =  (LinkedList<Word>) wordsInContext.clone();
        contextsByMiddleWord.add(wordInContext);  
      }
    }
    
    int countCountry = 0;
    int countMarket = 0;
    System.out.println("Number of contexts: " + contextsByMiddleWord.size());
    double sumOfWordsInAllContext = 0;
    //printing out the contexts and counting the contexts for market and country
    for(int i=0; i<contextsByMiddleWord.size(); i++)
    {
//      System.out.println("Context word: "+ contextsByMiddleWord.get(i).get(50).getLemma()+ 
//                         " [ ");
      if(contextsByMiddleWord.get(i).get(50).getLemma().equals("country"))
      {
        countCountry++;
        for(int j=0; j<contextsByMiddleWord.get(i).size(); j++)
          if(contextsByMiddleWord.get(i).get(j)!=null)
        {
          sumOfWordsInAllContext++;
//          System.out.print(contextsByMiddleWord.get(i).get(j).toString());
        } 
      }
      if(contextsByMiddleWord.get(i).get(50).getLemma().equals("market"))
      {
        countMarket++;
        for(int j=0; j<contextsByMiddleWord.get(i).size(); j++)
          if(contextsByMiddleWord.get(i).get(j)!=null)
        {
          sumOfWordsInAllContext++;
          //System.out.print(contextsByMiddleWord.get(i).get(j).toString());
        }
      }
//      System.out.print(" ]");
    }
    System.out.println("number of country contexts: " + countCountry + 
                       " number of market contexts: " + countMarket);
    
    //splitting the extracted corpus into training and testing
    int trainingSize = (int) (0.9 * contextsByMiddleWord.size());
    System.out.println("Training size " + trainingSize);
    HashMap<Word, Double> freqCountry = new HashMap<Word, Double>();
    HashMap<Word, Double> freqMarket = new HashMap<Word, Double>();
    HashMap<Word, Double> prbsCountry = new HashMap<Word, Double>();
    HashMap<Word, Double> prbsMarket = new HashMap<Word, Double>();
    HashMap<Word, Double> frequencies = new HashMap<Word, Double>();
    double countWordsInMarketContext = 0.0;
    double countWordsInCountryContext = 0.0;
    
    System.out.println("The number of words in all contexts: " + sumOfWordsInAllContext);
    System.out.println("The program is estimating probabilities. This might take a while...");
    //estimate the probabilities using witten-bell smoothing
    estimateProbabilities(trainingSize, contextsByMiddleWord, freqCountry,
                          freqMarket, frequencies, prbsCountry, prbsMarket, 
                          sumOfWordsInAllContext, countWordsInMarketContext, countWordsInCountryContext);
    
    //System.out.println("The prbsCountry map: " + prbsCountry);
// System.out.println("The program is printing probabilities. This might take a while...");
// Set ref = prbsCountry.keySet();
// Iterator it = ref.iterator();
// while (it.hasNext())
// {
//     Word word = (Word) it.next();
//     System.out.println(word.toString() + " and prb: " + prbsCountry.get(word));  
// }
    System.out.println("The program is calculating accuracy. This might take a while...");
    //test accuracy on the testing part using the naive bayes method
    test(trainingSize,contextsByMiddleWord, prbsCountry, prbsMarket);
  }
  
  private static void estimateProbabilities (int trainingSize,ArrayList<LinkedList<Word>> contextsByMiddleWord, 
                                             HashMap<Word, Double> freqCountry,
                                             HashMap<Word, Double> freqMarket, 
                                             HashMap<Word, Double> frequencies, 
                                             HashMap<Word, Double> prbsCountry,
                                             HashMap<Word, Double> prbsMarket,
                                             double sumOfWordsInAllContext, 
                                             double countWordsInMarketContext,
                                             double countWordsInCountryContext)
  {
    for(int i=0; i<trainingSize; i++)
    {
      //all content words in the contexts of country
      if(contextsByMiddleWord.get(i).get(50).getLemma().equals("country"))
      {
        for(int j=0; j<contextsByMiddleWord.get(i).size(); j++)
        {
          if(contextsByMiddleWord.get(i).get(j)!=null)
          {
            Word key = contextsByMiddleWord.get(i).get(j);
            //updating the frequency count
            if(freqCountry.containsKey(key)) 
            {
              double value = freqCountry.get(key)+1.0;  
              freqCountry.put(key, value);
              frequencies.put(key, value);
            }
            else
            {
              freqCountry.put(key, 1.0);   
              frequencies.put(key, 1.0);
            }
            countWordsInCountryContext++;
            //the calculation of the smoothed probability using witten-bell smoothing
//            if((freqCountry.containsKey(key)) )
//            {
         
//              double prb = (freqCountry.get(key) + freqCountry.size()*((freqMarket.get(key)+freqCountry.get(key))/sumOfWordsInAllContext )) 
//                / ( (countWordsInCountryContext+ countWordsInMarketContext) +  freqCountry.size()); 
//              prbsCountry.put(key, prb);
//            }
          }
        }
      }
      //all content words in the contexts of market
      if(contextsByMiddleWord.get(i).get(50).getLemma().equals("market"))
      {
        for(int j=0; j<contextsByMiddleWord.get(i).size(); j++)
        {
          if(contextsByMiddleWord.get(i).get(j)!=null)
          {
            Word key = contextsByMiddleWord.get(i).get(j);
            //updating the frequency count
            if(freqMarket.containsKey(key)) 
            {
              double value = freqMarket.get(key)+1.0;  
              double value2 = frequencies.get(key)+1.0;  
              freqMarket.put(key, value);
              frequencies.put(key, value2);
            }
            else
            {
              freqMarket.put(key, 1.0);   
              frequencies.put(key, 1.0);
            }
            countWordsInMarketContext++;
            //the calculation of the smoothed probability using witten-bell smoothing
//            if((freqCountry.containsKey(key)) && (freqMarket.containsKey(key)) )
//            {
//              double prb = (freqMarket.get(key) + freqMarket.size()*((freqCountry.get(key)+freqMarket.get(key))/sumOfWordsInAllContext )) 
//                / ( (countWordsInCountryContext + countWordsInMarketContext) +  freqMarket.size()); 
//              prbsMarket.put(key, prb);
//            }
          }
        }
      }
    }
    Set ref = frequencies.keySet();
    Iterator it = ref.iterator();
    while (it.hasNext()) 
    {
 Word key = (Word) it.next();
 if(freqMarket.containsKey(key)) 
 {
 double prb = ( freqMarket.get(key) + freqMarket.size()*((frequencies.get(key)+freqMarket.get(key))/sumOfWordsInAllContext ))
  /  (countWordsInMarketContext  +  freqMarket.size()); 
 prbsMarket.put(key, prb);
 
 }
 else
 {
     double prb = ( freqMarket.size()* ((frequencies.get(key))/sumOfWordsInAllContext ))
   / ( (countWordsInMarketContext  +  countWordsInCountryContext) + freqMarket.size()); 
  prbsMarket.put(key, prb);   
 }
 if(freqCountry.containsKey(key)) 
 {
 double prb = (freqCountry.get(key) + freqCountry.size()*(frequencies.get(key)/sumOfWordsInAllContext ))
  / ( countWordsInCountryContext +  freqCountry.size()); 
 prbsCountry.put(key, prb);
 }
 else
 {
     double prb = ( freqCountry.size()*((frequencies.get(key))/sumOfWordsInAllContext ))
   / ( (countWordsInCountryContext + countWordsInMarketContext) +  freqCountry.size()); 
  prbsCountry.put(key, prb);    
     
 }
    }
    
  }
  
  private static void test (int trainingSize,ArrayList<LinkedList<Word>> contextsByMiddleWord,
                            HashMap<Word, Double> prbsCountry, 
                            HashMap<Word, Double> prbsMarket)
  {
    double correct = 0.0;
    double predicted = 0.0;
    //applying the Bayes method on the testing part
    for(int i=trainingSize; i<contextsByMiddleWord.size(); i++)
    {
      double prbMarket = 1.0;
      double prbCountry = 1.0;
      String result ="";
      //System.out.println("prbsMarket: " + prbsMarket);
      for(int j=0; j<contextsByMiddleWord.get(i).size(); j++)
      {
        if(contextsByMiddleWord.get(i).get(j)!=null)
        {
          Word key = contextsByMiddleWord.get(i).get(j);
          //System.out.println("contains: " + prbsMarket.containsKey(key));
          if(prbsMarket.containsKey(key))
          {
            //System.out.println("prb: " + prbsMarket.get(key));
            prbMarket *= prbsMarket.get(key);
          }
          if(prbsCountry.containsKey(key))
          {
            //System.out.println("prb: " + prbsCountry.get(key));
            prbCountry *= prbsCountry.get(key); 
          }
        }
      }
      if(prbMarket>prbCountry)
        result = "market";
      else
        result = "country";
//      System.out.println("Predicted is: " + result + " and the correct is: " + contextsByMiddleWord.get(i).get(50).getLemma()
//       + " and the prb for market was: " +prbMarket 
//       + " and the prb for country was: " +prbCountry);
      if(contextsByMiddleWord.get(i).get(50).getLemma().equals(result))
        correct++;
      predicted++; 
    }
    
    double result = (correct/predicted) * 100.0;
    System.out.println("The accuracy is: " + result + "%");
  }
  
}
