package com.gash.scrape;

import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.util.List;
import java.util.Vector;

import com.gash.scrape.data.Link;
import com.gash.scrape.data.Link.LinkType;
import com.gash.scrape.data.Page;

import extractor.ImageExtractor;
import extractor.ImageExtractor.HtmlImage;
import extractor.LinkExtractor;
import extractor.LinkExtractor.HtmlLink;
import extractor.TextExtractor;
import extractor.TitleExtractor;

/**
 * Demonstration on scraping web pages for links and key words. Setup to work
 * for wikipedia.
 * 
 * Note while there are off the shelf scrapers, they are XML configured, and for
 * this demonstration, direct coding is okay.
 * 
 * 
 */
public class Scraper {
	StopWordsFile swords;

	public Scraper(File stopwordsF) throws Exception {
		swords = new StopWordsFile(stopwordsF);
	}

	public Page scrape(URL url) throws Exception {
		Page page = new Page();
		page.setUrl(url.toString());

		StringBuilder sb = new StringBuilder();
		byte[] raw = new byte[2048];
		InputStream io = null;
		try {
			io = new BufferedInputStream(url.openConnection().getInputStream());
			while (true) {
				int r = io.read(raw, 0, raw.length);
				if (r <= 0)
					break;
				sb.append(new String(raw, 0, r).toLowerCase());
			}

			String wholepage = sb.toString();

			// basic cleanup before processing - should be regex
			wholepage = wholepage.replaceAll("<i>", "");
			wholepage = wholepage.replaceAll("</i>", "");
			wholepage = wholepage.replaceAll("<tt>", "");
			wholepage = wholepage.replaceAll("</tt>", "");
			wholepage = wholepage.replaceAll("<b>", "");
			wholepage = wholepage.replaceAll("</b>", "");

			// TODO better cleanup and removal of javascript

			extractGeneral(page, wholepage);
			extractLinks(page, wholepage);

		} catch (FileNotFoundException fex) {
			//fex.printStackTrace();
		} catch (IOException ioe) {
			// Do nothing
		} finally {
			try {
				if (io != null) {
					io.close();
				}
			} catch (Exception ex1) {
				ex1.printStackTrace();
			}
		}

		return page;
	}

	public Page scrape(String webpage) throws Exception {
		return scrape(new URL(webpage));
	}

	public void extractGeneral(Page page, String wholepage) {
		TitleExtractor te = new TitleExtractor();
		String title = te.grabTitle(wholepage);
		page.setTitle(title);

		TextExtractor txte = new TextExtractor();
		List<String> text = txte.grabText(wholepage);
		if (text != null) {
			for (String t : text) {
				String[] list = t.split("\\s+");
				if (list == null)
					continue;

				for (String word : list) {
					// System.out.println("--> processing text: " + word);
					if (word.indexOf("<") == -1 && !swords.contains(word))
						page.addKeyword(word);
				}
			}
		}
	}

	private void extractLinks(Page page, String wholepage) {
		LinkExtractor le = new LinkExtractor();
		le.setWikipedia((page.getUrl().toLowerCase().indexOf("wikipedia") != -1));

		String baseUrl = "";
		if (le.isWikipedia()) {
			int i = page.getUrl().indexOf("/wiki");
			if (i != -1)
				baseUrl = page.getUrl().substring(0, i);
		}

		Vector<HtmlLink> links = le.grabLinks(wholepage);
		for (HtmlLink h : links) {
			Link link = new Link();
			link.setCitation(h.citation);

			if (h.anchor.indexOf("<img") != -1) {
				link.setType(LinkType.Image);
				ImageExtractor ie = new ImageExtractor();
				Vector<HtmlImage> images = ie.grabImages(h.anchor);
				for (HtmlImage i : images) {
					link.setUrl(i.image);
					// link.addKeyword(i.image);
					if (i.alt != null) {
						String[] list = i.alt.split("\\s");
						for (String k : list)
							link.addKeyword(k);
					}
				}
			} else {
				link.setType(LinkType.Text);
				if (h.anchor != null) {
					String[] list = h.anchor.split("\\s");
					for (String k : list)
						link.addKeyword(k);
				}
			}

			link.setUrl(h.link);

			if (link.getUrl() == null)
				;
			else if (le.isWikipedia() && link.getUrl() != null && link.getUrl().startsWith("/wiki")) {
				link.setUrl(baseUrl + link.getUrl());
			} else if (link.getUrl().startsWith("#"))
				link.setUrl(page.getUrl() + link.getUrl());

			page.addLink(link);

		}
	}
}
