/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */
package pl.tom.social.fetcher.crawler;

import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashSet;
import org.apache.commons.httpclient.DefaultHttpMethodRetryHandler;
import org.apache.commons.httpclient.Header;
import org.apache.commons.httpclient.HeaderElement;
import org.apache.commons.httpclient.HttpMethodBase;
import org.apache.commons.httpclient.HttpStatus;
import org.apache.commons.httpclient.URI;
import org.apache.commons.httpclient.methods.GetMethod;
import org.apache.commons.httpclient.params.HttpMethodParams;
import org.apache.commons.httpclient.util.DateUtil;
import com.torunski.crawler.filter.ILinkFilter;
import com.torunski.crawler.link.Link;
import com.torunski.crawler.parser.IParser;
import com.torunski.crawler.parser.PageData;
import com.torunski.crawler.parser.httpclient.AbstractHttpClient;
import com.torunski.crawler.parser.httpclient.HttpClientUtil;
import com.torunski.crawler.parser.httpclient.PageDataHttpClient;
import com.torunski.crawler.util.ILinkExtractor;
import com.torunski.crawler.util.LinksUtil;
import org.apache.log4j.Logger;

/**
 *
 * @author Tom
 */
public class SocialHttpClientParser extends AbstractHttpClient implements IParser {

	private static final transient Logger LOG = Logger.getLogger(SocialHttpClientParser.class);
	/** user agent HTTP header of the crawler. */
	public static final String USER_AGENT = "Mozilla/5.0 (Windows; U; Windows NT 5.1; ko; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)";
	/** set the default link extractor of LinksUtil. */
	private ILinkExtractor linkExtractor = LinksUtil.DEFAULT_LINK_EXTRACTOR;

	/**
	 * The constructor of SimpleHttpClientParser for single HTTP connections.
	 */
	public SocialHttpClientParser() {
		this(false);
	}

	/**
	 * Creates an instance of SimpleHttpClientParser.
	 *
	 * @param multiThreaded
	 *            true for creating a multi threaded connection manager else
	 *            only a single connection is allowed
	 */
	public SocialHttpClientParser(boolean multiThreaded) {
		super(multiThreaded);
	}

	/**
	 * Loads the data of the URI. A crawler can load different URIs at the same
	 * time and parse them lately. Hence all necessary information have to be
	 * stored in a PageData object. E.g. different threads can download the
	 * content of the URI parallel and parse them in a different order.
	 *
	 * @param link the link of the page
	 * @return the page data of the uri with a status code
	 *
	 * @see com.torunski.crawler.parser.IParser#load(com.torunski.crawler.link.Link)
	 */
	public PageData load(Link link) {

		String uri = link.getURI();
		LOG.info("download: " + uri);

		// Create a method instance.
		GetMethod httpGet = null;
		try {
			httpGet = new GetMethod(uri);
			httpGet.setFollowRedirects(false);
		} catch(Exception e) {
			LOG.info("HTTP get failed for " + uri);
			return new PageDataHttpClient(link, PageData.ERROR);
		}

		// Provide a custom retry handler
		httpGet.getParams().setParameter(HttpMethodParams.RETRY_HANDLER, new DefaultHttpMethodRetryHandler(5, false));
		// httpGet.getParams().setIntParameter(HttpMethodParams.BUFFER_WARN_TRIGGER_LIMIT, 1024*1024);
		httpGet.setRequestHeader("User-Agent", USER_AGENT);

		if(link.getTimestamp() > 0) {
			httpGet.setRequestHeader(HttpClientUtil.HEADER_IF_MODIFIED_SINCE, DateUtil.formatDate(new Date(link.getTimestamp())));
		}

		int statusCode = 0;
		String responseBody = null;
		try {
			// Execute the method
			statusCode = client.executeMethod(httpGet);

			if(statusCode == HttpStatus.SC_NOT_MODIFIED) {
				LOG.info("Content not modified since last request of " + uri);
			} else if(HttpClientUtil.isRedirect(statusCode)) {
				final URI redirect = HttpClientUtil.getRedirectURI(new URI(link.getURI(), false), httpGet);
				if(redirect != null) {
					responseBody = redirect.getURI();
					LOG.info("Redirect found for " + uri + " to " + redirect);
				} else {
					statusCode = HttpStatus.SC_INTERNAL_SERVER_ERROR;
					LOG.warn("Invalid redirect for " + uri);
				}
			} else if(statusCode != HttpStatus.SC_OK) {
				LOG.info("Method failed: " + httpGet.getStatusLine() + " for " + uri);
			} else if(!containsText(httpGet)) {
				LOG.warn("URL does not contain text or content-type is wrong of " + uri);
				httpGet.abort();
			} else {
				// read the response body as a stream
				responseBody = httpGet.getResponseBodyAsString();
				// don't overwrite the values of the given link object
				link = new Link(httpGet.getURI().getURI());
				link.setTimestamp(HttpClientUtil.getLastModified(httpGet));
			}
		} catch(IOException e) {
			responseBody = null;
			LOG.warn("Failed reading from uri=" + uri, e);
		} finally {
			// Release the connection.
			httpGet.releaseConnection();
		}

		if(HttpClientUtil.isRedirect(statusCode)) {
			PageDataHttpClient pageDataHttpClient = new PageDataHttpClient(link, PageData.REDIRECT);
			pageDataHttpClient.setData(responseBody);
			return pageDataHttpClient;
		} else if(responseBody != null) {
			return new PageDataHttpClient(link, responseBody, httpGet.getResponseCharSet());
		} else {
			if(statusCode == HttpStatus.SC_NOT_MODIFIED) {
				return new PageDataHttpClient(link, PageData.NOT_MODIFIED);
			} else {
				return new PageDataHttpClient(link, PageData.ERROR);
			}
		}
	}

	private boolean containsText(HttpMethodBase method) {
		Header contentType = method.getResponseHeader("content-type");
		if(contentType != null) {
			HeaderElement[] elements = contentType.getElements();
			for(int i = 0; i < elements.length; i++) {
				String name = elements[i].getName();
				if((name != null) && (name.startsWith("text"))) {
					return true;
				}
			}
			// if no correct content-type is found, so it isn't text
			return false;
		}
		// if no content type is set, it may be text
		return true;
	}

	/**
	 * {@inheritDoc}
	 * @see com.torunski.crawler.parser.IParser#parse(com.torunski.crawler.parser.PageData,
	 *      com.torunski.crawler.filter.ILinkFilter)
	 */
	public Collection parse(PageData pageData, ILinkFilter linkFilter) {
		if(!(pageData instanceof PageDataHttpClient)) {
			LOG.warn("Type mismatch in " + this.getClass().getName());
			return Collections.EMPTY_LIST;
		}
		if(pageData.getStatus() == PageData.REDIRECT) {
			Collection links = new HashSet();
			if((linkFilter == null) || (linkFilter.accept(pageData.getLink().getURI(), (String) pageData.getData()))) {
				links.add((String) pageData.getData());
			}
			return links;
		}
		return linkExtractor.retrieveLinks(pageData.getLink().getURI(), (String) pageData.getData(), linkFilter);
	}

	/**
	 * @return the used link extractor.
	 * @since 1.1
	 */
	public ILinkExtractor getLinkExtractor() {
		return linkExtractor;
	}

	/**
	 * Sets link extractor used to extract links from the retrieved content.
	 *
	 * @see com.torunski.crawler.util.ILinkExtractor
	 * @param linkExtractor
	 *            the link extractor used to extract links from the content.
	 * @since 1.1
	 */
	public void setLinkExtractor(ILinkExtractor linkExtractor) {
		if(linkExtractor == null) {
			throw new IllegalArgumentException("Parameter linkExtractor is null.");
		}
		this.linkExtractor = linkExtractor;
	}
}
