import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;


public class Crawler {

	protected Set<String> seen_links;
	protected Set<String> need_to_see;
	protected boolean link_once = false;
	
	public Crawler() {
		seen_links = new HashSet<String>();
		need_to_see = new HashSet<String>();
		
	}
	
	
	/**
	 * A method that takes in a html link and retrieves the text
	 * 
	 * @param the_link - A link string
	 * @return A String which is the body of the html
	 */
	
	protected String crawlLink(String the_link) {
		Document doc = null;
		int count = 0;
		int size = 0;
		
		String text = "";
		
		try {
			System.out.println("Page Retriever: Connecting to " + the_link);
            doc = Jsoup.connect(the_link).get();
            
        } catch (IOException | IllegalArgumentException ie) {
                System.err.println("Cannot connect to " + the_link);
        }
		
		if(doc != null) {
			
		    Elements links = doc.select("a");
		 
            //for each element with a link
            //attribute add absolute url to list
            for (Element link : links) {
            	String str_link = link.attr("abs:href");
            	
            	if(!link_once) {
            		need_to_see.add(str_link);
            	}
            	
            }
            link_once = true;
            
            if(doc != null && doc.body()!=null) {
            	text = doc.body().text();
            }
		}
		
		
		return text;
		
	}
	
	/**
	 * A method that takes a list of records in "id | link | expertise"
	 * format and gives a list of 
	 * @param the_list
	 * @return
	 */
	/*protected List<String> crawlContent(final List<String> the_list) {
		List<String> list = new ArrayList<String>();
		
		for(String str : the_list) {
			String text = crawlLink(str);
			
			if(text != null) {
				list.add(text);
			}
			
		}
		return list;
		
	}*/

}
