package be.destin.harvesting;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;

import javax.servlet.http.HttpServletRequest;

import org.apache.log4j.Logger;

import be.destin.beans.Reflector;
import be.destin.skos.core.Concept;
import be.destin.skos.core.NoScheme;
import be.destin.skos.core.SchemeUsage;
import be.destin.skos.core.SchemeUsageList;
import be.destin.skos.core.SkosManager;
import be.destin.skos.core.TermList;
import be.destin.skos.html.SkosHtmlManager;
import edu.uci.ics.crawler4j.crawler.CrawlConfig;
import edu.uci.ics.crawler4j.crawler.CrawlController;
import edu.uci.ics.crawler4j.fetcher.PageFetcher;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer;
import edu.uci.ics.crawler4j.url.URLCanonicalizer;

public class UsageCrawlerController {
	/** log4j category */
	private static Logger log = Logger.getLogger(UsageCrawlerController.class);

    public UsageCrawlerController(SkosHtmlManager skosHtmlManager, String application) throws Exception {
		String applicationRoot = skosHtmlManager.getApplicationRoot(application);
		SkosManager skosManager = skosHtmlManager.getSkosManager();
		String configDir = skosManager.getConfigDirectory();

    	SchemeUsageList usageList = skosHtmlManager.getSkosManager().getHarvestedUsage(application);
    	if (usageList == null || usageList.isEmpty()) return;

    	int numberOfCrawlers = 1;
        CrawlConfig config = new CrawlConfig();
        config.setCrawlStorageFolder(configDir+File.separator+application);
        config.setMaxDepthOfCrawling(0);
        config.setPolitenessDelay(100); // one second between queries
        // config.setResumableCrawling(true);
        config.setUserAgentString("ASKOSI");
        config.setIncludeHttpsPages(true);

        /*
         * Instantiate the controller for this crawl.
         */
        log.info(config.toString());
        PageFetcher pageFetcher = new PageFetcher(config);
        RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
        RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
        CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);

        /*
         * For each crawl, you need to add some seed urls. These are the first
         * URLs that are fetched and then the crawler starts following links
         * which are found in these pages
         */
        Map<Integer,List<UsageCrawlerItem>> harvestItems = new HashMap<Integer,List<UsageCrawlerItem>>();
        int nbLink = 0;
        int nbURL = 0;
        for (SchemeUsage aUsage : usageList) {
        	log.error("Preparing harvesting for scheme "+aUsage.getInScheme().getAbout()+", role="+aUsage.getRole());
        	String harvestTemplate = aUsage.getHarvestURL();
        	String harvestConditionTemplate = aUsage.getHarvestCondition();
        	if (harvestConditionTemplate != null) harvestConditionTemplate = harvestConditionTemplate.trim();
        	Object[] beans = new Object[2];
        	NoScheme currScheme = aUsage.getInScheme();
        	if (currScheme != null) {
        		Collection<Concept> toQuery = currScheme.getAllConcepts();
        		if (toQuery != null) {
        			for (Concept aConcept : toQuery) {
        				beans[0] = aConcept;
        				beans[1] = aConcept.getInScheme();
        				String condition = null;
        				if (harvestConditionTemplate == null || harvestConditionTemplate.isEmpty()) {
        					condition = "OK";
        				} else {
        					condition = Reflector.substitute(aConcept,harvestConditionTemplate,TermList.anyLanguage).trim();
        				}
        				if (condition != null && !condition.isEmpty()) {
        					String url = Reflector.substituteURL(applicationRoot,beans,harvestTemplate,TermList.anyLanguage);
        					controller.addSeed(url);
        	                String canonicalUrl = URLCanonicalizer.getCanonicalURL(url);
        	                if (canonicalUrl == null) {
        	                	log.error("Invalid seed URL: " + url);
        	                	continue;
        	                }
        	                int docId = controller.getDocIdServer().getDocId(canonicalUrl);
        					if (docId < 0) {
        						log.error("INVALID docId received from crawl4j: harvesting impossible!");
        						return;
        					}
        					List<UsageCrawlerItem> res = harvestItems.get(docId); // We may have multiple counts at the same URL: useful to treat them in one fetch!
        					if ( res == null ) {
        						res = new LinkedList<UsageCrawlerItem>();
        						harvestItems.put(docId,res);
        						nbURL++;
        						log.info("New docId="+docId+" for "+url);
        					} else log.info("Adding to docId="+docId+" for "+url);
        					res.add(new UsageCrawlerItem(aConcept,aUsage));
        					nbLink++;
        				}
        			}
        		}
        	}
        }
        log.error(nbURL+"/"+nbLink+" URL/links to explore");
        if (nbURL > 0) {
        	UsageCrawlerData ucd = new UsageCrawlerData();
        	ucd.harvestItems = harvestItems;
        	HttpServletRequest req = skosHtmlManager.getRequest();
        	if (req != null) {
        		ucd.servletContext = req.getServletContext();
        		ucd.trace(nbURL+"/"+nbLink+" URL/links exploration BEGIN");
        	}
        	try {
        		ucd.cacheFile = new File(configDir+File.separator+application+File.separator+"crawlinprogress.txt");
        		ucd.cacheWriter = new BufferedWriter(new FileWriter(ucd.cacheFile));
        	} catch (IOException ioe) {
        		log.error(ucd.cacheFile.getAbsolutePath(),ioe);
        		try {
        			ucd.cacheWriter.close();
        		} catch (IOException ioe2) {
        		}
        		return; // DO NOT CONTINUE!
        	}
	        controller.setCustomData(ucd);
        	/*
        	 * Start the crawl. This is a blocking operation, meaning that your code
        	 * will reach the line after this only when crawling is finished.
        	 */
        	controller.start(UsageCrawler.class, numberOfCrawlers);
        	ucd.cacheWriter.close();
    		if (!ucd.fatalError) {
    			ucd.trace(nbURL+"/"+nbLink+" URL/links exploration ends without error.");
        		// (1) get today's date
        	    Date today = Calendar.getInstance().getTime();

        	    // (2) create our date "formatter" (the date format we want)
        	    SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMdd-HHmmss");

        	    // (3) create a new String using the date format we want
        	    String fileName = formatter.format(today);
        	    // e.g.20090906-082323"
        		File dest  = new File(configDir+File.separator+"usage"+File.separator+application+fileName+".txt");
        		ucd.cacheFile.renameTo(dest);
        	}
        }
    }
}
