package my.thesis.bolts.computations;

import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;

import my.thesis.analytics.metrics.ThroughputMetric;
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.BasicOutputCollector;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseBasicBolt;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;

import java.util.ArrayList;
import java.util.Collections;
import java.util.logging.Level;
import java.util.logging.Logger;

import org.apache.commons.io.IOUtils;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;

import com.google.code.geocoder.Geocoder;

import my.thesis.utilities.Tags;

public class WordExtractor extends BaseRichBolt
{
	private static final long serialVersionUID = 1L;
	String filePath="src/main/resources/stopwords.txt";
	 String fileName="stopwords.txt";
	 boolean ignoreHashtags;
	 boolean ignoreUsernames;
	 private OutputCollector collector;
	 
	//private int counter=0;

	HashSet<String> STOPWORDS = new HashSet<String>();
	String SEPARATOR = " ";
	
	
	transient ThroughputMetric _throughputMetric;
	private static Boolean metricsOn=false;
	
	public void execute(Tuple tuple) 
	{
		if(metricsOn)
		{
			_throughputMetric.incrTuples();
		}
		
		//TEST
		//counter++;
		//System.out.println("WORD EXTRACTOR: Received " + counter );
		
		Long id = tuple.getLongByField("id");
		String text = tuple.getStringByField("text");
		String country = tuple.getStringByField("country");
						
		/***************************** TOKENIZE TEXT *****************************/
		
		text = text.toLowerCase().replaceAll("\\s+", " ");
				
        HashMap<String,Integer> tokens = TokenizeText(text,ignoreHashtags,ignoreUsernames);
        
        ArrayList<String> words = new ArrayList<String>();
       
        for(String w : tokens.keySet())
        {
        	words.add(w);
        }
        
        collector.emit(new Values(id,words,country));   

        /***************************** TOKENIZE TEXT *****************************/
	}

	public void declareOutputFields(OutputFieldsDeclarer declarer) 
	{
		declarer.declare(new Fields("id","words","country"));	
	}
	
	public void prepare(Map stormConf, TopologyContext context,OutputCollector collector)
	{
		//ΠΡΟΣΟΧΗ! ΘΕΛΕΙ PATH ΟΧΙ NAME
		LoadStopWords(filePath); 
		ignoreHashtags=true;
		ignoreUsernames=true;
		this.collector=collector;
		
		if(metricsOn)
		{
			_throughputMetric=new ThroughputMetric();
			context.registerMetric("wordcount", _throughputMetric, 30);
			_throughputMetric.setStartTime();
		}
	}
	
	//BOOK TWITTER DATA ANALYTICS
	//Converts a tweet into individual words/tokens. All stopwords are removed and the list also does not contain hyperlinks.
    //Splitting is performed on space.
    public HashMap<String,Integer> TokenizeText(String text, boolean ignoreHashtags, boolean ignoreUsernames)
    {
        String[] tokens = text.split(SEPARATOR);
        HashMap<String,Integer> words = new HashMap<String,Integer>();
        for(String token:tokens)
        {
            token = token.replaceAll("\"|'|\\.||;|,", "");
            if(token.isEmpty()||token.length()<=2||STOPWORDS.contains(token)||token.startsWith("&")||token.startsWith("http"))
            {
               continue;
            }
            else
            {
                if(ignoreHashtags)
                {
                    if(token.startsWith("#"))
                    {
                        continue;
                    }
                }
                if(ignoreUsernames)
                {
                    if(token.startsWith("@"))
                    {
                        continue;
                    }
                }
                if(!words.containsKey(token))
                {
                    words.put(token,1);
                }
                else
                {
                    words.put(token, words.get(token)+1);
                }
            }
        }
        return words;
    }
    
    //BOOK TWITTER DATA ANALYTICS
    //Loads the stop words from a file onto a collection

    public void LoadStopWords(String filename)
      {
          if(!filename.isEmpty())
          {
                BufferedReader bread = null;
                try 
                {
                    
                	//checkins = IOUtils.readLines(ClassLoader.getSystemResourceAsStream("checkins.txt"),
                            //Charset.defaultCharset().name()); 
                	
                	//ENALLAKTIKA - DEN DOYLEVEI GIA JAR
                	//URL url = this.getClass().getClassLoader().getResource(fileName);
    				//bread = new BufferedReader(new InputStreamReader(new FileInputStream(url.getPath()), "UTF8"));
    				
    				//DEN DOULEVEI GIA JAR
                	//bread = new BufferedReader(new InputStreamReader(new FileInputStream(filename), "UTF8"));
    				
                	//GIA NA DOUME ETSI
                	InputStream is = getClass().getClassLoader().getResourceAsStream(filename);
                	bread = new BufferedReader(new InputStreamReader(is));
                                  	
                	String temp = "";
                    try {
                        while ((temp = bread.readLine()) != null) 
                        {
                            if (!temp.isEmpty()) 
                            {
                                String[] stwords = temp.split(",");
                                for (String t : stwords) 
                                {
                                    t = t.toLowerCase();
                                    if (!STOPWORDS.contains(t)) 
                                    {
                                        STOPWORDS.add(t);
                                    }
                                }
                            }
                        }
                    } 
                    catch (IOException ex) 
                    {
                        
                    }
                }
                
            	catch (Exception e)
            	{
					System.err.println(e.getMessage());
				}
                
          }
      }
}
