/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */

package LinkRetriever;

import DBSupport.DBApi;
import General.Utilities;
import General.Globals;
import General.*;

/**
 *
 * @author Manuel
 */
public class SiteManager extends Thread{

    private String siteURL;
    private ListManager mgr;
    private int type;
    private String fileName;
    private SiteParser parser;
    private DBApi dbapi;
    
    public SiteManager(String site, String file, DBApi api){
            mgr = new ListManager();
            parser = new SiteParser(mgr);
            siteURL = site;
            fileName = file;
            mgr.insertIfNotVisited(site);
            dbapi = api;
            if(site.contains("google"))
                type = Globals.SITE_TYPE_GOOGLE;

            else if(site.contains("sourceforge"))
                type = Globals.SITE_TYPE_SOURCEFORGE;

    }

    public static Thread crawlWebSite(String siteURL, String file, DBApi a){
        //the following has to be done for every sitemap indexed
        //TODO add flexibiliy by indexing sitemap from robots.txt
        //TODO check whether file is old, otherwhise don't recompute
            SiteManager m = new SiteManager(siteURL,file,a);
            m.start();
            return m;
    }



    public void run(){
        System.out.println("Crawling "+this.siteURL+" ...");
         while(mgr.getLinksLeft() > 0){
            try {
                String next = mgr.visitNext();
                Thread.sleep(100);
                parser.parse(next);
            } catch (Exception ex) {
                System.out.println("Could not extract links");
            }
         }
         dbapi.insertNewLinks(mgr.getProjectLinks());
         System.out.println("Finished. Gathered " + mgr.getNoOfProjects() + " links from "+this.siteURL);
         Utilities.cleanMemory();
    }

}
