package crawler;

import java.io.File;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;

import org.htmlparser.Parser;
import org.htmlparser.filters.CssSelectorNodeFilter;
import org.htmlparser.filters.TagNameFilter;
import org.htmlparser.tags.FormTag;
import org.htmlparser.tags.LinkTag;
import org.htmlparser.tags.Span;
import org.htmlparser.util.NodeList;
import org.htmlparser.util.ParserException;

import utility.Serializer;
import work.BusyException;
import work.Job;
import work.threadPool.ThreadPool;
import data.stock.NewsStory;

public class NewsCrawler {

	//If you have Cornell credentials....
	private static final String NET_ID = "";
	private static final String PASSWORD = "";
	
	/**
	 * Perform a parse of all news entries for a given stock within a time-frame
	 * @param beginMonth the month to begin with
	 * @param beginYear  the year to begin with
	 * @param endYear	 the year to end with
	 * @param ticker	 the ticker of the stock
	 * @param searchTerm the search term to use
	 * @param directory  the directory to output results
	 * @param serializer the serializer object for persisting parses
	 */
	public static void crawlStock(int beginMonth, int beginYear, int endYear, String ticker,
			String searchTerm, String directory, Serializer serializer) {				
		
		try { 
			for(int year = beginYear; year >= endYear; year--) {				
				//Do the parse
				System.out.println("Beginning parse...");
		
				//Loop for every month until completion
				for(int month = beginMonth; month > 0; month--) {
					parseMonth(month, year, searchTerm, 
							directory, ticker, serializer);
				}
				
				System.out.println("Operation complete");
				
				beginMonth = 12;
			}
		} catch(Exception e) {
			e.printStackTrace();
			System.err.println("SOMETHING HAPPENED!");
		}
	}
	
	/**
	 * Go to the Cornell Proxy for Proquest and authenticate login
	 * @param browser the VirtualBrowser to use
	 * @throws ParserException 
	 */
	private static void authenticate(VirtualBrowser browser) throws ParserException {
		System.out.println("Authenticating proquest credentials...");
		Parser firstPage = new Parser(browser.request(false, "http://resolver.library.cornell.edu/umia/akx9769", null, null));
		
		NodeList formNodes = firstPage.parse(new TagNameFilter("form"));
		FormTag formTag = (FormTag) formNodes.elementAt(0);
		String action = "https://web2.login.cornell.edu/" + formTag.getFormLocation();
		action = action.replaceAll(" ", "%20");
		
		HashMap<String,String> login = new HashMap<String,String>();
		login.put("netid", NET_ID);
		login.put("password", PASSWORD);
		browser.request(true, action, action, login);
		System.out.println("Login complete.");
	}
	
	/**
	 * Parse an individual month's worth of news stories (up to 1000)
	 * @param month 	 the month index
	 * @param year		 the year
	 * @param searchTerm the term to search (url formatted, please)
	 * @param directory  where to place the serialized objects
	 * @param ticker	 the ticker symbol
	 * @param serializer the serializer to use to persist objects
	 * @throws ParserException 
	 * @throws BusyException 
	 */
	@SuppressWarnings("unchecked")
	private static void parseMonth(int month, int year, String searchTerm, 
			String directory, String ticker, Serializer serializer) throws ParserException, BusyException {		
		System.out.println("Processing month: " + month + "/" + year);
		
		VirtualBrowser browser = new VirtualBrowser();
		authenticate(browser);
		
		//Create our parallelized job runner
		ThreadPool jobRunner = new ThreadPool(8);
		jobRunner.putSharedData("browser", browser);
		ArrayList<String> linkUrls = new ArrayList<String>();

		String body = browser.request(false, getUrl(searchTerm, month, year), null, null);
		
		//Determine the count of the results
		NodeList countNodes = (new Parser(body)).extractAllNodesThatMatch(new CssSelectorNodeFilter("#resultCount td.textMedium span.bold"));
		Span countSpan = (Span) countNodes.elementAt(0);
		int count;
		try {
			count = Integer.parseInt(countSpan.getStringText());
		} catch(NullPointerException e) {
			count = 0;
		}
		System.out.println("Found " + count + " results.");
		
		if(count > 0) {
		
			//Parse the links from the first page
			linkUrls.addAll(getLinksFromIndex(body));
			
			//If there are more pages, distribute the work
			if(count > 10) {
				System.out.println("Distributing index page parses...");
				
				NodeList navNodes = (new Parser(body)).extractAllNodesThatMatch(new CssSelectorNodeFilter("#pageNavLine a"));
				LinkTag link = (LinkTag) navNodes.elementAt(0);
				String linkUrl = "http://proquest.umi.com.proxy.library.cornell.edu" + link.getLink();
				linkUrl = linkUrl.replace("&firstIndex=10", "");
				jobRunner.putSharedData("url", linkUrl);
				Job indexPageJob = new IndexPageJob();
				
				for(int page = 1; page <= count / 10 && page < 99; page++)
					jobRunner.addJob(indexPageJob, new Integer(page));
				
				List<Object> results = jobRunner.dispatch();
				
				System.out.println("Compiling results...");
				
				for(Object result : results) {
					List<String> resultList = (List<String>) result;
					if(resultList != null)
						linkUrls.addAll(resultList);
				}
			}
			
			System.out.println("Successfully gathered " + linkUrls.size() + " links.");
			
			System.out.println("Distributing links...");
			
			Job documentJob = new DocumentJob();
			for(String linkUrl : linkUrls)
				jobRunner.addJob(documentJob, linkUrl);
			List<Object> results = jobRunner.dispatch();
			
			List<NewsStory> stories = new ArrayList<NewsStory>();
			System.out.println("All links processed.");
			for(Object result :results) {
				if(result != null) {
					NewsStory story = (NewsStory) result;
					stories.add(story);
				}
			}
			System.out.println("Gathered " + stories.size() + " news stories.");
			
			System.out.println("Serializing list of news stories.");
			File topDir = new File(directory);
			File stockDir = new File(topDir, ticker);
			if(!stockDir.isDirectory())
				stockDir.mkdir();
			File monthFile = new File(stockDir, year+"-"+month+".news");
			serializer.writeObject((Serializable) stories, monthFile);
		}
		
		jobRunner.killThreads();
		browser.close();
	}
	
	/**
	 * Get the url for the search results for a given query
	 * @param searchTerm the search query
	 * @param month		 the month to search
	 * @param year		 the year to search
	 * @return			 a properly formatted URL
	 */
	private static String getUrl(String searchTerm, int month, int year) {
		String monthString;
		if(month < 10)
			monthString = "0" + month;
		else
			monthString = "" + month;
		
		return "http://proquest.umi.com.proxy.library.cornell.edu/pqdweb?" +
			"RQT=" + "512" +
			"&SQ=" + searchTerm +
			"&querySyntax=" + "PQ" +
			"&searchInterface=" + "1" +
			"&SrchMode=" + "2" +
			"&TS=" + "1264480875" +
			"&moreOptState=" + "CLOSED" +
			"&clientId=" + "8424" +
			"&FO=" + "CITABS" +
			"&fromDate=" + monthString + "%2F01%2F" + year +
			"&toDate=" + monthString + "%2F31%2F" + year +
			"&ShowFT=" + "1" +
			"&revType=" + "review" +
			"&revPos=" + "all" +
			"&STYPE=" + "newspapers" +
			"&sortby=" + "REVERSE_CHRON" +
			"&PageSize=" + "10" +
			"&h_pub_title=&h_pub_title1=&h_pub_title2=&h_pub_title3=&h_pub_title4=&h_pub_title5=&h_pub_title6=&h_pub_title7=&h_pmid=&h_pmid1=&h_pmid2=&h_pmid3=&h_pmid4=&h_pmid5=&h_pmid6=&h_pmid7=&OP1=AND&SQ1=&FO1=CITABS&OP2=AND&SQ2=&FO2=CITABS&OP3=AND&SQ3=&FO3=CITABS&OP4=AND&SQ4=&FO4=CITABS&OP5=AND&SQ5=&FO5=CITABS&OP6=AND&SQ6=&FO6=CITABS&DBId=-1&date=RANGE&onDate=&beforeDate=&afterDate=&Oppubtitle=AND&pubtitle=&Opsubject=AND&subject=&Opcompany=AND&company=&Opname=AND&name=&Opgeo=AND&geo=&Opcc=AND&cc=&Opsic=AND&sic=&OpSF=AND&SF=any&OpAT=AND&AT=any";
	}
	
	/**
	 * Parse an index page and return a list of all the links to documents
	 * @param body the body html of the page
	 * @return	   a list of urls
	 * @throws ParserException
	 */
	public static List<String> getLinksFromIndex(String body) throws ParserException {
		ArrayList<String> linkUrls = new ArrayList<String>();
		NodeList linkNodes = (new Parser(body)).extractAllNodesThatMatch(new CssSelectorNodeFilter("#results a.bold"));
		for(int i = 0; i < linkNodes.size(); i++) {
			LinkTag link = (LinkTag) linkNodes.elementAt(i);
			String linkUrl = "http://proquest.umi.com.proxy.library.cornell.edu" + link.getLink();
			linkUrls.add(linkUrl);
		}
		return linkUrls;
	}
	
}
