package com.barkerton.crawler.parser;

import java.util.Map;
import java.util.HashMap;
import java.util.StringTokenizer;
import java.util.concurrent.atomic.AtomicInteger;
import java.io.File;
import java.net.URL;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

import com.barkerton.crawler.queuing.PageQueue;
import com.barkerton.crawler.Page;
import com.barkerton.crawler.util.PropertyManager;
import com.barkerton.crawler.util.Util;

/**
 * Class analysis web page content: tokenizing, frequency counts,
 * and IO to disk for indexing searches.
 * 
 * @author c.barker
 *
 */

public class PageAnalyzer implements Runnable {
	
	private Log log = LogFactory.getLog(PageAnalyzer.class);
	
	private static final AtomicInteger CACHE_FILE_NBR = new AtomicInteger(1);
	private static final String CACHE_DIR_PROP = "cache.dir";
	private Map<String, Integer> tokenFreq;		// word tokens and frequency counts
	private PageQueue pQueue;					// Pages to be processed for indexing
	
	public PageAnalyzer() {
		this.pQueue = PageQueue.getInstance();
		this.tokenFreq = new HashMap<String, Integer>();
	}
	
	public void run() {

		while(true) {
			Page page = null;
			try {
				page = pQueue.dequeue();
				log.debug("Dequeued page: " + page);
			} catch (InterruptedException ie) {
				log.error("Unable to dequeue page: " + ie.getMessage());
			}
			
			if (page == null)
				continue;

			tokenizeFreq(page);
			serializeFreq();
		}
	}
	
	/**
	 * Removes tags and tokenizes words based on white space.
	 * This is followed by counting the word frequencies and incrementing
	 * the frequency map.
	 */
	private void tokenizeFreq(Page page) {
		// remove tags and tokenize words based on white space
		StringTokenizer st = new StringTokenizer(Parser.eliminateTags(page.getContent()), " ");
		String token = null;
		String word  = null;
		Stemmer s = new Stemmer();
		
		while(st.hasMoreTokens()) {
			token = st.nextToken();
			word  = s.stem(token);
			
			if (StopWords.isStopWord(token))
				continue;
			
			// increment frequency count
			int freq = 0;
			Integer value = tokenFreq.get(word);
			if (value != null) {
				freq = value.intValue();
			}
			tokenFreq.put(word, Integer.valueOf(freq));
			log.debug("Word[" + word + "] : Count[" + freq + "]");
		}
	}
	
	/**
	 * Method serializes and writes to disk the token frequency map
	 * for later processing.
	 */
	private void serializeFreq() {
		String cache_dir = PropertyManager.getInstance().getValue(CACHE_DIR_PROP, "cache");
		
		File fCache = new File(cache_dir);
		if (!fCache.isDirectory() && !fCache.mkdir()) {
			log.error("Unable to create serialization directory for cache: " + cache_dir);
			return;
		}
		
		String fileName = cache_dir + File.separator + "cache_" + Integer.toString(CACHE_FILE_NBR.getAndIncrement()) + ".bin";
		Util.serialize(new File(fileName), tokenFreq);
	}
	
	/*
	 * Simple sanity "smoke" test
	 */
	public static void main(String[] args) throws Exception {
		URL url = new URL("http://www.utexas.edu");
		PageAnalyzer pa = new PageAnalyzer();
		Page p = new Page(url);
		p.setContent("I like\n to eat eggs and to taste fruit in the morning.  Yes, I do eggs.");
		pa.tokenizeFreq(p);
		pa.serializeFreq();
	}
}
