package webcrawler;

import java.io.InputStream;
import java.lang.reflect.Array;
import java.net.HttpCookie;
import java.net.URL;
import java.net.URLConnection;
import java.rmi.RemoteException;
import java.util.ArrayList;
import java.util.StringTokenizer;

import org.tempuri.ICrawlerWSProxy;
import org.xml.sax.InputSource;

import datacontracts.CrawledPage;

public class WebCrawler {

	public static void main(String[] args) throws RemoteException {
		
		CrawledPage page = new CrawledPage("http://www.java.com/");
		crawlpage(page);

	}

	private static void crawlpage(CrawledPage page) throws RemoteException {
		ICrawlerWSProxy iCrawler = new ICrawlerWSProxy();
		
		System.out.println("Starting to crawl " + page.getRootUrl());
		URL crawlUrl = null;
		InputStream urlStream = null;
		String type = null;
		try {
			crawlUrl = new URL(page.getRootUrl());
			page.setRootValidity(true);
		} catch (Exception ex) {
			page.setRootValidity(false);
			
		}
		
		//Adding a root url to the database
		iCrawler.createorUpdateRootUrl(page.getRootUrl(), page.isRootValid());
		//if Root is invalid, we need to end the function
		if (!page.isRootValid())
			return;

		try {
			URLConnection connection = crawlUrl.openConnection();
			int contentLength = connection.getContentLength();

			if (contentLength == 0)
				throw new Exception("Content length was 0");

			urlStream = crawlUrl.openStream();
			type = connection.getContentType();
			// String type = connection.guessContentTypeFromStream(urlStream);
			System.out.println("Content Type: " + type);
		} catch (Exception ex) {
			System.out.println(String.format(
					"Unable to open the connection to %s", page.getRootUrl()));
			return;
		}

		if (!type.contains("text/html")) {
			System.out.println("Probably should stop here, not html?");
			return;
		}
		/*
		 * At this point, we have verified that the url is valid, We will now
		 * search it for links
		 */
		byte b[] = new byte[1000];
		String content = null;
		try {
			int read = urlStream.read(b);
			content = new String(b, 0, read);
			// System.out.println(content);
			while (read != -1) {
				read = urlStream.read(b);
				if (read != -1) {
					String newContent = new String(b, 0, read);
					content += newContent;
				}
			}
			urlStream.close();
			if (content == null)
				throw new Exception("Content is null");
			// System.out.println(content);
		} catch (Exception ex) {
			System.out.println("Error searching for links: " + ex.getMessage());
		}

		/*
		 * Here is where we begin actually parsing the html searching for
		 * attributes that make up a hyperlink
		 */
		String lowerCaseContent = content.toLowerCase();
		int index = 0;
		while ((index = lowerCaseContent.indexOf("<a", index)) != -1) {
			// if the content does not contain any of these attributes, we know
			// it does not have any links
			if ((index = lowerCaseContent.indexOf("href", index)) == -1
					|| (index = lowerCaseContent.indexOf("=", index)) == -1) {
				break;
			}
			index++;
			String remainingContent = content.substring(index);
			StringTokenizer st = new StringTokenizer(remainingContent,
					"\t\n\r\">#");
			String strLink = st.nextToken();

			URL urlLink = null;
			CrawledPage.SubLink sublink = page.new SubLink(strLink);
			try {
				urlLink = new URL(page.getRootUrl() + strLink);
				sublink.setLinkValidity(true);

			} catch (Exception ex) {
				System.out.println("Not a valid link " + strLink);
				sublink.setLinkValidity(false);
				page.addSublink(sublink);
				
			}
		
			String linkType = null;
			try {

				// Excluding links that may not be in the file structure of the
				// current webpage
				// They are most likely links to exernal web pages
				if (!strLink.contains("http")) {
					URLConnection linkConnection = urlLink.openConnection();
					InputStream linkStream = urlLink.openStream();
					linkType = linkConnection.getContentType();
					linkStream.close();

					if (!type.contains("text/html")) {
						System.out
								.println("Probably should stop here, not html?");
						break;
					}

					/*
					 * At this point, we have verified that a link is valid, we
					 * now need to handle this appropriately, whatever that may
					 * be
					 */
					page.addSublink(sublink);
					//Adding Sublink to the database
					System.out.println(sublink.getSublink() + " " + page.getRootUrl() + " " + sublink.isLinkValid());
					iCrawler.createorUpdateSublink(sublink.getSublink(), page.getRootUrl(), sublink.isLinkValid());
					//breaking out of the loop if the sublink is invalid
					if (!sublink.isLinkValid())
						break;
				} else {
					CrawledPage.ExternalLink extLink = page.new ExternalLink(
							strLink);
					try {
						URL urllink = new URL(extLink.getExternalLink());
						extLink.setLinkValidity(true);
					} catch (Exception ex) {
						extLink.setLinkValidity(false);
					}
					page.addExtLink(extLink);
					iCrawler.createorUpdateExternalLink(extLink.getExternalLink(), extLink.isLinkValid(), page.getRootUrl());
				}
			} catch (Exception ex) {
				page.addSublink(sublink);
				System.out.println(ex.getMessage());
			}

		}
		System.out.println("Done!");
		System.out.println();
		System.out.println("Sublinks");
		for (CrawledPage.SubLink sl : page.getSublinks()) {
			System.out.println(sl.getSublink());
		}

		System.out.println();
		for (CrawledPage.ExternalLink el : page.getExtLinks()) {
			System.out.println(el.getExternalLink());
		}

	}

}
