package com.iiitb.wtp;

import java.io.IOException;
import java.net.URL;
import java.net.URLConnection;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.concurrent.CopyOnWriteArrayList;

import org.apache.http.client.ClientProtocolException;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;

import com.iiitb.model.SeedHandler;

public class Parser {

	String pageSource = null;
	SeedHandler db = null;
	static WebGraph wg = new WebGraph();
	long serialized_id;
	int count = 0;

	public Map<String, String> parse(String contentOfPage, String url,
			String tag) throws SQLException, IOException {

		// Find if there are any hyper links. Add it to the crawl frontier.
		return hyperlinksExtractor(url, contentOfPage, tag);
		// return urlList;

	}

	private Map<String, String> hyperlinksExtractor(String url,
			String contentOfPage, String tag) throws SQLException, IOException {
		db = new SeedHandler();
		Map<String, String> list = new HashMap<>();
		String link = "";
		System.out.println("extracting hyperlinks for " + url);

		String[] ref = contentOfPage.split("href=\"http://");
		String temp1[], temp2[];
		int i = 1;
		// dbConn.insertSearchedData(url, tag);
		try {
			for (i = 1; i < ref.length; i++) {
				temp1 = ref[i].split("\"");
				link = "http://" + temp1[0];
				if (!link.endsWith(".css") && !link.endsWith(".js")
						&& !link.endsWith(".pdf") && !link.endsWith(".ppt")
						&& !link.endsWith("pptx") && !link.endsWith("doc")
						&& !link.endsWith("docx") && !link.endsWith(".ps")
						&& !link.endsWith("xls") && !link.endsWith("xlsx")
						&& !link.endsWith("txt") && !link.endsWith(".rar")
						&& !link.endsWith(".zip") && !link.endsWith(".exe")) {
					// try if it is correct URL by connecting it.
					URL connect = new URL(link);
					URLConnection yc = connect.openConnection();

					// Also keep the url in the arraylist
					// list.add(link);
					list.put(link, tag);
					// double weight;
					// weight = getDocSimilarity(contentOfPage, tag);
					wg.addLink(url, link, 1.0);

					// Check if the URL is already in DB otherwise insert it

					// dbConn.insertwebGraph(url, link);
				}
			}

		} catch (Exception e1) {
			e1.printStackTrace();
		}
		return list;

	}

	public void serialiseWG() throws SQLException {
		db = new SeedHandler();

		db.serializeJavaObjectToFile(wg);

	}

	public void deserialiseWG() throws SQLException, ClassNotFoundException,
			IOException {
		db = new SeedHandler();

		wg = (WebGraph) db.deSerializeJavaObjectFromFile();
		System.out.println(wg.numNodes() + "Number of nodes");

	}

	public void calcProb() throws SQLException, ClientProtocolException,
			IOException {
		Probability prob = new Probability();
		prob.updateDefaultProb(wg);
		db = new SeedHandler();
		db.serializeJavaObjectToFile(wg);
		MarkovSim ms = new MarkovSim();
		ms.runSimulation(wg);
	}

	public void calcProbDeserialized() throws SQLException,
			ClientProtocolException, IOException {
		db = new SeedHandler();
		WebGraph wg;
		wg = (WebGraph) db.deSerializeJavaObjectFromFile();

		MarkovSim ms = new MarkovSim();
		ms.runSimulation(wg);
	}

}
