package Crawler;

/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */

import DBSupport.DBApi;
import LinkRetriever.DataManager;
import MetricsRetriever.*;
import General.*;
import LinkRetriever.*;
import java.io.File;
import java.util.ArrayList;
import java.util.Calendar;
/**
 *
 * @author Manuel
 */
public class Scheduler {
    
    private String repos;
    private ArrayList<Thread> linkCrawlerThreads;
    private ArrayList<Thread> projectCrawlerThreads;
    private DBApi dbSupport;

    public Scheduler(String repo, String limit){
        Integer i = Integer.parseInt(limit)/2;

        Globals.crawlLimit = i.intValue();
        repos = repo;
        linkCrawlerThreads = new ArrayList<Thread>();
        projectCrawlerThreads = new ArrayList<Thread>();
        dbSupport = new DBApi();
    }

    public void startCrawler(){
        this.startLinkRetriever();
        this.startInformationRetriever();
    }

    public void startLinkRetriever(){
        if(repos.contains("google")){
            linkCrawlerThreads.add(SiteManager.crawlWebSite(Globals.GOOGLE_SITE, Globals.FILE_LINKS[0],dbSupport));
        }
        if(repos.contains("sourceforge")){
            linkCrawlerThreads.add(SiteManager.crawlWebSite(Globals.SOURCEFORGE_SITE ,Globals.FILE_LINKS[1],dbSupport));
        }

        for(int i = 0;i<linkCrawlerThreads.size();i++){
            try {
                linkCrawlerThreads.get(i).join();
            } catch (InterruptedException ex) {
                System.out.println("Crawler Thread "+ i + " crashed");
            }
        }
        //add other sitecrawlers always increasing the number of crawlers respectively
    }

    public void startInformationRetriever(){
        //eliminate old output files
        Utilities.writeToFile(Utilities.getCurrentTime() + ";", Globals.projectFile, false);
        FileManager.removeDir(new File(Globals.DATA_DIRECTORY));

        //first parse the files to get a set of links
        DataManager datamgr = new DataManager(dbSupport);
        datamgr.parseProjectLinks(Globals.FILE_LINKS);

        //start several threads each taking then the links to be parsed until they finish
        for(int i = 0;i<Globals.threadNumber;i++){
            RetrievalManager retr = new RetrievalManager(datamgr,dbSupport);
            retr.start();
            projectCrawlerThreads.add(retr);
        }
        for(int i = 0;i<projectCrawlerThreads.size();i++){
            try {
                projectCrawlerThreads.get(i).join();
            } catch (InterruptedException ex) {
                System.out.println("Crawler Thread "+ i + " crashed");
            }
        }
        
        long timePassed =   Calendar.getInstance().getTimeInMillis() - RetrievalManager.startDate;
        timePassed = timePassed /(1000*60);
        System.out.println("\nTIME:  "+timePassed+" min"+"\nSUCCESS: "+RetrievalManager.noSuccess+"\nNOT JAVA: "+RetrievalManager.noFailedContext+"\nNO BYTECODE: "+RetrievalManager.noFailedArchieveLink+"\nCOULD NOT RETRIEVE BYTECODE METRICS: "+RetrievalManager.noFailedCode+"\nNO SVN: "+RetrievalManager.noFailedSVNLink+"\nCOULD NOT RETRIEVE SVN DATA: "+RetrievalManager.noFailedCode);

    }

}