/*
 * @(#)LinkExtractor.java	1.0 05/01/07
 *
 * Copyright 2007 Fabio Gasparetti. All rights reserved.
 */
package org.tabularium.text.html;

import java.io.*;
import java.util.*;

import org.tabularium.text.analysis.*;

/**
 * Helps extract links from Web pages.
 *
 * @author  Fabio Gasparetti
 * @version 1.0, 05/01/07
 */

public abstract class LinkExtractor {
	/* the following tags do not break sentences on their own;
	 * if there are spaces along with them, they will be included 
	 * in the output during parsing, breaking the flow.
	 * Note: maybe SPAN and DIV should be removed?
	 */
	public static final String[] DEFAULT_HTML_NOT_FLOWBREAK = { "STYLE", "SPAN",
			"LINK", "DIV", "H1", "H2", "H3", "H4", "H5", "H6", "STRONG", "B",
			"I", "U", "EM", "BIG", "SMALL", "TT", "SUP", "SUB", "A"/*, "P" */ };
	
	protected String[] htmlNotFlowBreak = DEFAULT_HTML_NOT_FLOWBREAK;
		
	/**
	 * For a given page this method extracts the list of the links and the
	 * associated text, that is, the anchors and the surrounding keywords.
	 * Repetitions of the same url occouring in more the one links are allowed
	 * in the returned list.
	 * <p>
	 * The <code>winSize</code> determines how many words to consider on the
	 * left and on the right of the link, e.g., if <code>winSize = 5</code>
	 * the method extracts up to 5 words on the left and 5 on the right of
	 * a link.
	 * <p>
	 * Each element of the list is an array of four <code>String</code> objects:
	 * the 1st and the 2nd are the tokens before and after the link,
	 * the 3rd is the anchor, the 4th is the title and the last is the url.
	 * <p>
	 * Some implementations might require content freed of numeric character
	 * references and character entity
	 *
	 * @see com.parc.search.commons.HtmlUtils#removeEscapedCh()
	 */
	public abstract List extractLinkWindows(String url, String htmltext,
			int winSize, Analyzer lexicalAnalyzer) throws Exception;

	/**
	 * Returns an ordered list of urls (w/repetitions) corresponding to
	 * the links in the given Web page.
	 * <p>
	 * Note: the Web page must be escaped-character free, the returned urls are
	 * absolute and not normalized.
	 *
	 * @see com.parc.search.commons.HtmlUtils#removeEscapedCh()
	 */
	public abstract List extractLinks(String url, String htmltext)
			throws Exception;

	/**
	 * Builds an element for the extractLinkWindows() method.
	 * 
	 * @throws IOException Analyzer exception
	 */
	protected static String[] buildLinkWindow(String before, String after,
			String linkText, String titleText, String url, int size, Analyzer analyzer) throws IOException {
		String[] linkData = new String[5];
		
			TokenStream tokenStream;
			LinkedList list;
			String token;
			// builds before-list
			list = new LinkedList();
			String bef = before;
			int i = before.lastIndexOf('.');
			if (i != -1) {
				bef = before.substring(i);
			}
			tokenStream = analyzer.tokenStream(new StringReader(bef));
			token = tokenStream.next();
			while (token != null) {
				list.addLast(token);
				if (list.size() > size) {
					list.removeFirst();
				}
				token = tokenStream.next();
			}
			// converts from list to string
			StringBuffer s = new StringBuffer();
			Iterator iter = list.iterator();
			while (iter.hasNext()) {
				String item = (String) iter.next();
				s.append(item + " ");
			}
			if (s.length() > 0)
				s.deleteCharAt(s.length() - 1);
			linkData[0] = s.toString();
			// builds after-list
			list = new LinkedList();
			String aft = after;
			i = after.indexOf('.');
			if (i != -1) {
				aft = after.substring(0, i);
			}
			tokenStream = analyzer.tokenStream(new StringReader(aft));
			token = tokenStream.next();
			while (token != null) {
				list.addLast(token);
				if (list.size() >= size) {
					break;
				}
				token = tokenStream.next();
			}
			// converts from list to string
			s = new StringBuffer();
			iter = list.iterator();
			while (iter.hasNext()) {
				String item = (String) iter.next();
				s.append(item + " ");
			}
			if (s.length() > 0)
				s.deleteCharAt(s.length() - 1);
			linkData[1] = s.toString();
			// builds anchor-list
			list = new LinkedList();
			tokenStream = analyzer.tokenStream(new StringReader(linkText));
			token = tokenStream.next();
			while (token != null) {
				list.addLast(token);
				token = tokenStream.next();
			}
			// converts from list to string
			s = new StringBuffer();
			iter = list.iterator();
			while (iter.hasNext()) {
				String item = (String) iter.next();
				s.append(item + " ");
			}
			if (s.length() > 0)
				s.deleteCharAt(s.length() - 1);
			linkData[2] = s.toString();
			// builds title-list
			list = new LinkedList();
			tokenStream = analyzer.tokenStream(new StringReader(titleText));
			token = tokenStream.next();
			while (token != null) {
				list.addLast(token);
				token = tokenStream.next();
			}
			// converts from list to string
			s = new StringBuffer();
			iter = list.iterator();
			while (iter.hasNext()) {
				String item = (String) iter.next();
				s.append(item + " ");
			}
			if (s.length() > 0)
				s.deleteCharAt(s.length() - 1);
			linkData[3] = s.toString();			
			// url
			linkData[4] = url;
		return linkData;
	}
}