<?php
namespace org\crawlgenie\modules\jobsbank;
use org\crawlgenie\application\environment\Request;
use org\crawlgenie\application\path\Path;
use org\crawlgenie\application\base\Factory;
use org\crawlgenie\application\document\Document;
use org\crawlgenie\application\filter\Filter;
use org\crawlgenie\models as Models;
use org\crawlgenie\models\dao\JobListingMetaDAO;
use org\crawlgenie\models\dao\JobListingDAO;
use org\crawlgenie\application\util\Util;
use org\crawlgenie\modules\JobsCrawlerController;

use \Logger;
use \CGException;

/**
 * Description of JobsBank Controller
 *
 * @author Jinson
 */
class JobsBankController extends JobsCrawlerController {
    private $categoriesToCrawl;
    private $categoriesURLReference = array('homepage' => 'https://www.jobsbank.gov.sg', 
        'category_base_url' => 'https://www.jobsbank.gov.sg/ICMSPortal/portlets/JobBankHandler/SearchResult.do?tabSelected=CATEGORY&aTabFunction=aTabFunction&Function=',
        'search_base_url' => 'https://www.jobsbank.gov.sg/ICMSPortal/portlets/JobBankHandler/SearchResult3.do');
        
    
    public function __construct() {
        parent::__construct();
        $this->_init();
        $this->_logger = Logger::getLogger(__CLASS__);
    }



    protected function _init() {
        //lets set the custom valid actions for this service
        $this->_setValidActions(array('get_home_page','crawl_jobs', 
            'parse_jobs', 'fix_empty_files', 'remove_duplicates', 
            'repository_integrity_check', 'remove_orphaned_files'));
    }


    public function execute() {
        parent::execute();
	
        //process the individual action request.
        if($this->getCurrentAction() == 'crawl_jobs') {
            $this->crawlJobs();
//            $this->_document->prepareDocument();

        } elseif($this->getCurrentAction() == 'parse_jobs') {
            if(Request::getParam('batch_id') != null && Request::getParam('author') != null) {
            $this->parseJobs(Request::getParam('batch_id'), Request::getParam('author'));
                
            } else {
                throw new \CGException('No batch_id and/or author defined for jobs parsing.');
            }
//            $this->_document->prepareDocument();
            
        } elseif($this->getCurrentAction()== 'fix_empty_files') {
            if(Request::getParam('batch_id') != null && Request::getParam('author') != null) {
                self::fixEmptyFiles(Request::getParam('batch_id'), Request::getParam('author'));                
                
            } else {
                throw new \CGException('No batch_id and/or author defined for empty file cleanup.');
            }

        } elseif($this->getCurrentAction()== 'remove_duplicates') {
            if(Request::getParam('batch_id') != null) {
                self::removeDuplicateFiles(Request::getParam('batch_id'));               
            } else {
                throw new \CGException('No batch_id and/or author defined for duplicates removal.');
            }
        } elseif($this->getCurrentAction() == 'repository_integrity_check') {
            if(Request::getParam('batch_id') != null && Request::getParam('author') != null) {
                self::verifyMetadataIntegrity(Request::getParam('batch_id'), Request::getParam('author'));
            } else {
                throw new \CGException('No batch_id and/or author defined for repository integrity check.');
            } 
        } elseif($this->getCurrentAction() == 'remove_orphaned_files') {
            if(Request::getParam('batch_id') != null && Request::getParam('author') != null) {
                self::removeOrphanedDiskFiles(Request::getParam('batch_id'), Request::getParam('author'));
            } else {
                throw new \CGException('No batch_id and/or author defined for orphaned files removal.');
            } 
        }       
		
		
    }

    
   
    
    protected function crawlJobs() {
        //establish which categories to crawl
        $this->setCategoriesToCrawl(Request::getParam('categories'));
        $this->_logger->info(__METHOD__.': Categories to crawl: ' . print_r($this->getCategoriesToCrawl(), TRUE));
        
        // set our batch and instance ID 
        $halfOfMonth = (date('e') > 15) ? 2 : 1;
        $batchID = date('Ym') . '.' . $halfOfMonth;
        $instanceID = date('YmdHis');
        $linksProcessed = 0;
        $numSkipped = 0;
        
        //now load the homepage first so any cookies required are set. 
        $ch = self::getCurlHandle($this->categoriesURLReference['homepage']); // init curl
        curl_exec($ch);

        
        //then for each category to crawl, load up and loop through the listing pages and open up each individual link
        foreach($this->getCategoriesToCrawl() as $cat) {
            curl_setopt($ch, CURLOPT_POST, TRUE); 
            curl_setopt($ch, CURLOPT_POSTFIELDS, array('{actionForm.checkValidRequest}' => 'YES'));
            curl_setopt($ch, CURLOPT_URL, $this->categoriesURLReference['category_base_url'] . urlencode($cat));
            $rawLists = curl_exec($ch);
            $lists = Request::isGzipContent($rawLists) ? gzdecode($rawLists) : $rawLists;
           
            $this->_logger->info(__METHOD__.': Crawling category "' . $cat . '"');
            
            //now parse this page to get hold of all the individual listing URLs, current page number, and total pages.
            $html = new \simple_html_dom();
            $html->load($lists);
            $totalPages = $html->find('input#txtPageNumber', 0)->parent()->next_sibling()->next_sibling()->innertext;

            $this->_logger->info(__METHOD__.': Total pages for this category is "' . $totalPages . '"');

            $newConnectionRepeats = 0;
            $numOfContinuousDuplicatesInCategory = 0;
            for($i=1;$i<=$totalPages;$i++) {
                usleep(rand(\CGConfig::getMinDelay(),  \CGConfig::getMaxDelay()));
                
                //for pages other than the first, we must load the new page
                if($i>1) {
                    curl_setopt($ch, CURLOPT_POST, TRUE); 
                    curl_setopt($ch, CURLOPT_POSTFIELDS, array('{actionForm.checkValidRequest}' => 'YES', 
                        '{actionForm.SPECIALISATION}'=>$cat, '{actionForm.currentPageNumber}'=>$i,
                        '{actionForm.keyWord}'=>$cat, '{actionForm.recordsPerPage}'=>5, 
                        '{actionForm.searchType}'=>'Quick Search', '{actionForm.sortBy}'=>1,
                        '{actionForm.totalPageCount}'=>$totalPages));
                    curl_setopt($ch, CURLOPT_URL, $this->categoriesURLReference['search_base_url']);
                    $rawLists = curl_exec($ch);
                    $lists = Request::isGzipContent($rawLists) ? gzdecode($rawLists) : $rawLists;

                    //now parse this page to get hold of all the individual listing URLs, current page number, and total pages.
                    $html = new \simple_html_dom();
                    $html->load($lists);                    
                    
                    if(null !== $html->find('input#txtPageNumber', 0)) {
                        $totalPages = $html->find('input#txtPageNumber', 0)->parent()->next_sibling()->next_sibling()->innertext;                    
                    }
                }
                
                $this->_logger->info(__METHOD__.': Working on page "' . $i . '" of "'. $totalPages .'"');
                
                $numLinksFound = count($html->find('#searchresult td[class=jobDesActive] > a'));
                
                $this->_logger->info(__METHOD__.': Size of listings page is: "' . strlen($lists) 
                        . '", Total links for this page: "' . $numLinksFound.'"');

                $listingPagesFailed = array();
                $repeat = 0;
                while($numLinksFound == 0) {
//                    $delay = rand(60000000,120000000);
                    $delay = rand(\CGConfig::getMinDelay() * 3, \CGConfig::getMaxDelay() * 3 + $repeat * 3000000);
                    $formattedDelay = sprintf("%01.2f", $delay);
                    $this->_logger->info(__METHOD__.': Pausing crawling for ' . $formattedDelay / 1000000 . ' seconds.'); 
                    usleep($delay);  // delay 50 to 120 seconds before trying again. Because it seems our iterative above doesn't work
                                       
                    $rawLists = curl_exec($ch);
                    $lists = Request::isGzipContent($rawLists) ? gzdecode($rawLists) : $rawLists;

                    //now parse this page to get hold of all the individual listing URLs, current page number, and total pages.
                    $html = new \simple_html_dom();
                    $html->load($lists);                                       
                    $numLinksFound = count($html->find('#searchresult td[class=jobDesActive] > a'));
                    
                    $this->_logger->info(__METHOD__.': Size of listings page is: "' . strlen($lists) 
                            . '", Total links for this page: "' . $numLinksFound.'"');

                    
                    if($repeat == 1 && $numLinksFound == 0) {
                        if($newConnectionRepeats <1) {
                            //lets try closing the connection and creating a new one.
                            $this->_logger->info(__METHOD__.': Closing connection and creating a new one to try again.'); 
                            curl_close($ch);
                            $ch = self::getCurlHandle($this->categoriesURLReference['search_base_url']);
                            --$i;
                            ++$newConnectionRepeats;
                            break;
                        }
                    }
                    elseif($repeat == 2 && $numLinksFound == 0) { 
                        $msg = 'FAILED to get valid listings content for ' . $cat . ' page "'. $i .'" after more than '.$repeat.' tries';
                        $this->_logger->info(__METHOD__.': ' . $msg);
                        throw \CGException($msg);
                    }
                    ++$repeat;
                }

                
                //get all individual listing URLs
                foreach($html->find('#searchresult td[class=jobDesActive] > a') as $link) {
                    ++$linksProcessed;
                    $jobLink = $link->getAttribute('href');
                    $url = $this->categoriesURLReference['homepage'] . $jobLink;
                    preg_match('/id=(.+)$/i', $jobLink, $matches);
                    $jobID = $matches[1];
                    
                    // if the jobID has already been crawled in this batch, lets skip it.
                    if(Models\dao\JobListingMetaDAO::metaObjExists2($batchID, CG_NAME . '_' . Request::getParam('mod'), $url)) {
                        ++$numSkipped;
                        $this->_logger->info(__METHOD__.': URI exists for this batch, skipping ('.$numSkipped .' / ' 
                                . $linksProcessed . '): ' . $url); 
                        ++$numOfContinuousDuplicatesInCategory;
                        
                        if($numOfContinuousDuplicatesInCategory >= 2 * $numLinksFound) {
                            $this->_logger->info(__METHOD__ . ': Found 2 consecutive pages worth of duplicates. Skipping category "'.$cat.'".');
                            break;
                        }
                        continue;
                    }
                    $numOfContinuousDuplicatesInCategory = 0;  // reset the continuous duplicates counter.
                    
                    //add in a random delay
                    usleep(rand(\CGConfig::getMinDelay(), \CGConfig::getMaxDelay()));
                    
                    $jch = self::getCurlHandle($url);
                    $raw = curl_exec($jch);
                    $jContent = Request::isGzipContent($raw) ? gzdecode($raw) : $raw;                    
                    $contentLength = strlen($jContent);
                    
                    $repeat = 0;
                    while($contentLength == 0) {
                        usleep(rand(\CGConfig::getMinDelay(), \CGConfig::getMaxDelay() + $repeat * 1000000));  // delay 2 to 5 seconds before trying again
                        $raw = curl_exec($jch);
                        $jContent = Request::isGzipContent($raw) ? gzdecode($raw) : $raw;
                        $contentLength = strlen($jContent);
                        
                        if($repeat == 3) {
                            //TODO: consider adding in more logging or notification eg. through email
                            $msg = 'FAILED to get content for ' . $url . ' after more than 3 tries';
                            $this->_logger->info(': ' . $msg);
                            throw \CGException($msg);                            
                        }
                        ++$repeat;
                    }

                    $jhtml = new \simple_html_dom();
                    $jhtml->load($jContent);

                    $jmObj = new Models\JobsbankJobListingMeta();
                    $jmObj->setBatchID($batchID);
                    $jmObj->setInstanceID($instanceID);
                    $jmObj->setTitle($jhtml->find('div.jobDes h3', 0)->innertext);
                    $jmObj->setBody($jhtml->find('div[class=container top bottom whiteBG]', 0)->innertext);
                    $jmObj->setContent($jContent);
                    $jmObj->setCrawlURI($this->categoriesURLReference['homepage'] . $jobLink);

                    preg_match('/>Posting Date:([^<]+)/i', $jContent, $matches);
                    $jmObj->setDateCreated(trim($matches[1]));
                    $jmObj->setFileSize($contentLength);

                    $this->_logger->info(__METHOD__.': JobID: "'. $jobID .'"');
                    $jmObj->setPath('../crawl_repository/crawlgenie/jobsbank/'
                            . $batchID .'/' 
                            .$instanceID . '_'. $jobID . '.html');

                    $jmObj->store();
                    
                    curl_close($jch);
                }
                
                
                if($numOfContinuousDuplicatesInCategory >= 2 * $numLinksFound && $numLinksFound >0) {
                    $this->_logger->info(__METHOD__ . ': Switching to next category...');
                    break;
                }
            
                
            }
            $this->_logger->info(__METHOD__.': Crawling of category "' .$cat. '" completed. Total records skipped: ' 
                    . $numSkipped . ' out of '.$linksProcessed);
        }
        curl_close($ch);
        print "<br /><br />\r\n" . 'Total records skipped: ' . $numSkipped . ' out of '.$linksProcessed . "<br /><br />\r\n";
    }

    
    
    
    /**
     * This function takes in the html content (or the html snippet for the content)
     * of the Job Listing and returns the extracted key=>values in an array.
     * 
     * @param type $content
     * @return type
     */
    protected function getJobListingValues($content) {
        // Note: Code below relies on JD page structure remaining constant.
        $return = array();
        $matches = array();
        $dtz = new \DateTimeZone('Asia/Singapore');
        
        if($content == null) {
            throw new \CGException('Empty body content sent. Please run maintenance on the repository.');
        }
        
        $html = new \simple_html_dom();
        $html->load($content);

        preg_match_all('/ing Date:([^<]+)/i', $content, $matches);
        $postDate = strlen(trim($matches[1][0])) > 0 ? \DateTime::createFromFormat('d-M-Y', trim($matches[1][0]), $dtz) : null;
        $closeDate = strlen(trim($matches[1][1])) > 0 ? \DateTime::createFromFormat('d-M-Y', trim($matches[1][1]), $dtz) : null;

        $return['post_date'] = null;
        $return['close_date'] = null;

        if(!$postDate || !$closeDate) {
            $msg = 'Unable to create date time object from posting/closing dates. Contact the administrator to review the source formats.';
            $this->_logger->error($msg);
        } else {
            if($postDate) {
                $postDate->setTime(00,00,00);
                $return['post_date'] = $postDate->format('Y-m-d H:i:sO');        
            }
            if($closeDate) {
                $closeDate->setTime(00,00,00);
                $return['close_date'] = $closeDate->format('Y-m-d H:i:sO');        
            }
        }


        preg_match('/(JOB-20[^<]+)/i', $content, $matches);
        $return['listing_id'] = trim($matches[1]);

        if(null == $html->find('div.jobDes h3', 1)->first_child()) {
            //assuming for now that it's either span or a <a href> within the span child.
            if(count($html->find('div.jobDes h3', 1)->next_sibling()->find('a')) > 0) {
                $return['listing_company_name'] = $html->find('div.jobDes h3', 1)->next_sibling()->find('a', 0)->innertext;            
            } else {
                $return['listing_company_name'] = null;
            }
        } else {
            $return['listing_company_name'] = $html->find('div.jobDes h3', 1)->first_child()->first_child()->innertext;        
        }
        
        $return['listing_title'] = $html->find('div.jobDes h3', 0)->innertext;

        //$return['description'] = strip_tags($html->find('#divMainJobDescription', 0)->innertext);
        if(count($html->find('#divMainJobDescription li')) > 0) {
            $descLines = $html->find('#divMainJobDescription li');
            $descArray = array();
            
            foreach($descLines as $line) {
                $line = trim(strip_tags($line));
                $descArray[] = substr($line, strlen($line)-1, 1) == '.' ? substr($line, 0, strlen($line) -1) : $line;                
            }
            $return['description'] = implode('. ', $descArray);
        } else {
            $return['description'] = trim(strip_tags($html->find('#divMainJobDescription', 0)->innertext));    
        }


        //$return['requirements'] = strip_tags($html->find('#divMainSkillsRequired', 0)->innertext);
        if(count($html->find('#divMainSkillsRequired li')) > 0) {
            $reqLines = $html->find('#divMainSkillsRequired li');
            $reqArray = array();
            foreach($reqLines as $line) {
                $line = trim(strip_tags($line));
                $reqArray[] = substr($line, strlen($line)-1, 1) == '.' ? substr($line, 0, strlen($line) -1) : $line;
            }
            $return['requirements'] = implode('. ', $reqArray);    
        } else {
            $return['requirements'] = trim(strip_tags($html->find('#divMainSkillsRequired', 0)->innertext));
        }
        
        

        $return['industry'] = trim(strip_tags($html->find('div.jd_contentRight', 1)->first_child()->next_sibling()->first_child()->next_sibling()->innertext));

        $categories = $html->find('div.jd_contentRight', 1)->first_child()->first_child()->next_sibling()->find('li span');

        $categoriesArray = array();
        foreach($categories as $category) {
            $categoriesArray[] = $category->innertext;
        }
        $return['category'] = implode(';', $categoriesArray);


        $employment_types = $html->find('h3.jd_header3', 3)->next_sibling()->find('span.text');
        $employmentTypesArray = array();
        foreach($employment_types as $emp) {
            $employmentTypesArray[] = $emp->innertext;
        }
        $return['employment_type'] = implode(';', $employmentTypesArray);

        $working_hours = $html->find('h3.jd_header3', 4)->next_sibling()->find('span.text');
        $workHoursArray = array();
        foreach($working_hours as $wh) {
            $workHoursArray[] = trim($wh->innertext);
        }
        $return['working_hours'] = strlen(implode(';',$workHoursArray)) > 1? implode(';',$workHoursArray) : null;

        $shift_patterns = $html->find('h3.jd_header3', 5)->next_sibling()->find('span.text');
        $shiftPatternsArray = array();
        foreach($shift_patterns as $sp) {
            $shiftPatternsArray[] = trim($sp->innertext);
        }
        $return['shift_pattern'] = strlen(implode(';',$shiftPatternsArray)) > 1 ? implode(';',$shiftPatternsArray) : null;

        $salaries = $html->find('h3.jd_header3', 6)->next_sibling()->find('span.text');
        $salariesArray = array();
        foreach($salaries as $sal) {
            $salariesArray[] = strip_tags($sal->innertext);
        }
        $return['salaries'] = implode(';', $salariesArray);
        if(preg_match('/\d+/i', $return['salaries'])) {
            preg_match('/S\$([\d,\.]+)\s*-\s*S\$([\d,\.]+)/i', strip_tags($return['salaries']), $matches);
            $return['min_salary'] = isset($matches[1]) ? (float) str_replace(',','',$matches[1]) : null;
            $return['max_salary'] = isset($matches[2]) ? (float) str_replace(',','',$matches[2]): null;
        } else {
            $return['min_salary'] = null;
            $return['max_salary'] = null;
        }

        $jobLevels = $html->find('h3.jd_header3', 7)->next_sibling()->find('span.text');
        $jobLevelsArray = array();
        foreach($jobLevels as $jl) {
            $jobLevelsArray[] = strip_tags($jl->innertext);
        }
        $return['job_level'] = implode(';', $jobLevelsArray);

        $return['min_years_experience'] = (int) trim($html->find('h3.jd_header3', 8)->next_sibling()->find('li', 0)->innertext);

        $return['vacancies'] = (int) trim($html->find('span.jd_header3 text', 1)->parent()->next_sibling()->innertext);

        $return['viewed'] = (int) trim($html->find('span.jd_header3 text', 2)->parent()->next_sibling()->innertext);

        $return['applied'] = (int) trim($html->find('span.jd_header3 text', 3)->parent()->next_sibling()->innertext);
        
        $return['address'] = trim(strip_tags($html->find('div.jd_contentRight', 0)->first_child()->first_child()->next_sibling()->innertext));

        $utilObj = new Util();
        $return['location'] = $utilObj->getCountriesFromAddress($return['address']);       

        $contactDetails = trim(strip_tags($html->find('div.jd_contentRight', 0)->first_child()->next_sibling()->first_child()->next_sibling()->innertext));
        preg_match('/([a-z0-9_]+|[a-z0-9_]+\.[a-z0-9_]+)@(([a-z0-9]|[a-z0-9]+\.[a-z0-9]+)+\.([a-z]{2,4}))/i', 
                $contactDetails, $matches);
        $return['contact_email'] = isset($matches[0]) ? $matches[0] : null;


        preg_match('/\s+(\d{3,})/i', $return['address'], $matches);
        $return['postal_code'] = isset($matches[1]) ? $matches[1] : null;


        preg_match('/([+\d]*\s*\d+)/i', str_replace($return['contact_email'],'',$contactDetails), $matches);
        $return['contact_telephone'] = isset($matches[1]) ? $matches[1] : null;

        return $return;
    }
    
 
    
    
    /**
     * This function parses the JobListingMeta content and 
     * then creates and stores a JobListing object.
     * 
     * @param type $batchID
     * @param type $author
     */
    protected function parseJobs($batchID, $author) {
        $db = Factory::getDBConn();
        $limit = 500;

        $numRows = JobListingDAO::getNumUnProcessedRecords($batchID, $author);
        if($numRows >= $limit) {
            $rowsProcessed = 0;
            while($rowsProcessed < $numRows) {            
                $rows = JobListingDAO::getAllUnProcessedRows($batchID, $author, $limit, 0);
                
                foreach($rows as $row) {
                    $msg = 'Processing and storing JobListing, metadata_id: "'.$row->id.'"';
                    $this->_logger->info(__METHOD__ . ': ' . $msg);
                    
                    $parsedArray = $this->getJobListingValues($row->body);
                    $jobListing = new Models\JobListing();
                    $jobListing->setMetadataID($row->id);
                    $jobListing->load($parsedArray);
                    $jobListing->store();
                }
                $rowsProcessed += $limit;
            }
            
        } else {
            
            $rows = JobListingDAO::getAllUnProcessedRows($batchID, $author);
            $rowsProcessed = 0;
            foreach($rows as $row) {
                $msg = 'Processing and storing JobListing, metadata_id: "'.$row->id.'"';
                $this->_logger->info(__METHOD__ . ': ' . $msg);
                
                $parsedArray = $this->getJobListingValues($row->content);

                $jobListing = new Models\JobListing();
                $jobListing->setMetadataID($row->id);
                $jobListing->load($parsedArray);
                $jobListing->store();
                ++$rowsProcessed;
            }
        }
        
        $msg = 'Total jobs parsed: "'. $rowsProcessed . '" out of "'.$numRows.'"';
        $this->_logger->info(__METHOD__. ': ' . $msg);
        print $msg;  // future will add to document object
    }
    
    
    
    
    
    protected function runMaintenance($batchID, $author) {
        self::removeDuplicateFiles($batchID);
        self::removeOrphanedDiskFiles($batchID, $author);
        self::fixEmptyFiles($batchID, $author);
        self::verifyMetadataIntegrity($batchID, $author);
    }
    

    /**
     * This method is meant to be invoked to clean up the repository
     * by looking for files with empty content and redownloading them or deleting 
     * them from the repository if they are no longer valid.
     * 
     * @param type $batchID
     * @param type $author
     */
    public static function fixEmptyFiles($batchID, $author) {
        $db = Factory::getDBConn();

        // lets first look for empty files.        
        $query = "select * from "._CG_REPOS_METADATA_TABLE." where "
                ."batch_id = ? and author=? and file_size=0";
        $queryValues = array($batchID, $author);

        Logger::getLogger(__CLASS__)->info(__METHOD__.': Repository cleanup empty files query: ' . print_r($query, true));
        Logger::getLogger(__CLASS__)->info(__METHOD__.': Repository cleanup empty files values: ' . print_r($queryValues, true));

        $db->preparedQuery($query, $queryValues);
        $rows = $db->getResults();

        foreach($rows as $row) {
            krumo($row);
            usleep(rand(1500000, 3000000));  // delay 1.5 to 3 seconds

            $ch = self::getCurlHandle($row->crawl_uri);
            $raw = curl_exec($ch);
            $content = Request::isGzipContent($raw) ? gzdecode($raw) : $raw;
            $contentLength = strlen($content);
            
            $repeat = 0;
            while($contentLength == 0) {
                usleep(rand(2000000 + $repeat * 500000, 2000000 + $repeat * 1000000));  // delay 2 to 5 seconds before trying again
                $raw = curl_exec($ch);
                $content = Request::isGzipContent($raw) ? gzdecode($raw) : $raw;
                $contentLength = strlen($content);

                if($repeat == 6) { 
                    $this->_logger->info(__METHOD__.': FAILED to get content for ' . $row->crawl_uri . ' after more than 3 tries. Removing from repository and metadata store.');
                    break;
                }
                ++$repeat;
            }
            
            if($contentLength >0) {
                
                //lets save the new content to the disk store 
                if(file_put_contents($row->path, $content) === FALSE) {
                   Logger::getLogger(__CLASS__)->error(__METHOD__ . ': Unable to write to ' . $row->path);
                   throw new CGException(__METHOD__ . ': Unable to write to ' . $row->path);                   
                }
                
                preg_match('/>Posting Date:([^<]+)/i', $content, $matches);
                $dateCreated = trim($matches[1]);
                $characterSet = mb_detect_encoding($content, 'auto');
                $lastModified = date('Y-m-d H:i:sO');

                $jhtml = new \simple_html_dom();
                $jhtml->load($content);
                $title = $jhtml->find('div.jobDes h3', 0)->innertext;
                $body = $jhtml->find('div[class=container top bottom whiteBG]', 0)->innertext;
                
                //update the metadata repository
                $query = "update "._CG_REPOS_METADATA_TABLE." set date_created=?, character_set=?, "
                        ."file_size=?, last_modified=?, title=?, body=? where batch_id = ? and author=? and crawl_uri=?";
                $queryValues = array($dateCreated, $characterSet, $contentLength, $lastModified, 
                    $title, $body, $batchID, $author, $row->crawl_uri);

                Logger::getLogger(__CLASS__)->info(__METHOD__.': Repository cleanup empty files query: ' . print_r($query, true));
                Logger::getLogger(__CLASS__)->info(__METHOD__.': Repository cleanup empty files values: ' . print_r($queryValues, true));

                $db->preparedQuery($query, $queryValues);
                
            } else {
                //delete the file from repository as well as metadata store.
                unlink($row->path);
                $query = "delete from "._CG_REPOS_METADATA_TABLE." where batch_id = ? and author=? and crawl_uri=?";
                $queryValues = array($batchID, $author, $row->crawl_uri);

                $db->preparedQuery($query, $queryValues);
                Logger::getLogger(__CLASS__)->info(__METHOD__.': Deleted file ' . $row->path. ' from repository and metadata record store.');                
            }
                        
        }
    }


    /**
     * Returns an ordered map of jobs to filepaths multi-dimensional array
     * 
     * 
     * @param type $path
     * @return int
     */
    public static function getMappedJobFilesArray($path) {
        $filenames = scandir($path);
        $map = array();
        
        foreach($filenames as $filename) {
            if($filename == '.' || $filename == '..') {
                continue;                
            }
            
            $jobID = substr($filename, strlen($filename)-21, 16);

            if(array_key_exists($jobID, $map)) {
                $map[$jobID]['filenames'][] = $filename;
                ++$map[$jobID]['count'];   
            } else {
                $map[$jobID]['jobID'] = $jobID;
                $map[$jobID]['filenames'][] = $filename;
                $map[$jobID]['count'] = 1;                    
            }
        }
        
        /* can't sort like this if we want to retain the array keys.
        //now sort the array        
        usort($map, function ($a, $b) {
            if ($a['count'] == $b['count']) {
                return 0;
            }
            return ($a['count'] < $b['count']) ? -1 : 1;
        });        
        */
        return $map;
    }

    
    public static function removeDuplicateFiles($batchID) {
        $db = Factory::getDBConn();
        $path = '../crawl_repository/crawlgenie/jobsbank/' . $batchID;
        $map = self::getMappedJobFilesArray($path);        

        $workingArray = array();
        $k = 0;
        //pick out those with counts > 1
        foreach($map as $mapElement) {
            if($mapElement['count'] > 1) {
                $workingArray[$k] = $mapElement;
                //sort the filenames subarray
                sort($workingArray[$k]['filenames']);
                                
                //delete every file except the latest
                for($s=0; $s<count($workingArray[$k]['filenames'])-1;$s++) {
                    $f = $workingArray[$k]['filenames'][$s];
//                    print 'deleting: ' . $workingArray[$k]['filenames'][$s] . '<br />'."\r\n";
                    $query = "delete from " . _CG_REPOS_METADATA_TABLE . " where path like '%?'";
                    $db->preparedQuery($query, array($f));
                    unlink('../crawl_repository/crawlgenie/jobsbank/'.$batchID . '/'.$f);
                }
                
                ++$k;                
            }
        }
        krumo($workingArray);
        
    }
    
    
    /**
     * This method goes through each row in the db in batches of 1000 records and 
     * then invokes the matchAndCleanPaths() method to check that every row's path 
     * field points to a valid disk file. If not, it will look for existing jobID
     * files on disk and update the path. Failing that, the row will be deleted.
     * 
     * @param type $batchID
     * @param type $author
     */
    public static function verifyMetadataIntegrity($batchID, $author) {
        $db = Factory::getDBConn();

        $path = '../crawl_repository/crawlgenie/jobsbank/' . Path::getCleanPath($batchID);
        $diskFileMap = self::getMappedJobFilesArray($path);
        
        $query = "select count(*) from " . _CG_REPOS_METADATA_TABLE . " where batch_id=? and author=?";
        $queryValues = array($batchID, $author);
        $db->preparedQuery($query, $queryValues);
        $numRows = $db->getVar();
       
        if($numRows > 1000) {
            $rowsProcessed = 0;
            while($rowsProcessed < $numRows) {
                $query = "select * from " . _CG_REPOS_METADATA_TABLE . " where batch_id=? and author=? order by id " 
                        . " limit 1000 offset " . $rowsProcessed;
                $queryValues = array($batchID, $author);
                
                $db->preparedQuery($query, $queryValues);
                $rows = $db->getResults();
                self::ensureValidDBPaths($rows, $diskFileMap);
                $rowsProcessed += 1000;
            }            
        } else {
            
            $query = "select * from " . _CG_REPOS_METADATA_TABLE . " where batch_id=? and author=? order by id";
            $queryValues = array($batchID, $author);
            
            $db->preparedQuery($query, $queryValues);        
            $rows = $db->getResults();
            self::ensureValidDBPaths($rows, $diskFileMap);            
        }
    }
    
    
    /**
     * This method checks the rowset to ensure that the path of every row 
     * points to a valid disk file. If not, it will look for existing jobID
     * files on disk and update the path. Failing that, the row will be deleted.
     * 
     * @param type $rowset
     * @param type $diskFileMap
     */
    public static function ensureValidDBPaths($rowset, $diskFileMap) {
        $db = Factory::getDBConn();
        
        //first check that each jobID in the db points to an existing file on disk.
        foreach($rowset as $row) {
            preg_match('/id=(.+)$/i', $row->crawl_uri, $matches);
            $jobID = $matches[1];
                        
            if(!file_exists($row->path)) {
                //if the file does not exist, search for the jobID in the $diskFileMap.
                if(array_key_exists($jobID, $diskFileMap)) {
                    //update the db record's path with the correct one.
                    $newPath = preg_replace('/(.+\/)(.+\.html$)/i', '$1', $row->path) . $diskFileMap['filename'];
                    $query = "update " . _CG_REPOS_METADATA_TABLE . ' set path =? where id=?';
                    $db->preparedQuery($query, array($newPath, $row->id));
                    $this->_logger->info(__METHOD__.': Updating row id "' . $row->id . '" path from '.$row->path.' to ' . $newPath); 
                    
                } else {
                    //if cannot find we can either try to obtain the resource again or just delete the row
                    
                    //deleting the row for simplicity's sake
                    $query = "delete from " . _CG_REPOS_METADATA_TABLE . ' where id=?';
                    $db->preparedQuery($query, array($row->id));
                    $this->_logger->info(__METHOD__.': Deleted row id "' . $row->id . '" as the jobID cannot be found on disk.'); 
                    
                }
            }
            
        }
    }
    
    /**
     * This method should be run AFTER removeDuplicateFiles method.
     * It will check for the existence of each file's path within 
     * the repos_metadata table. If not found, the file will be deleted.
     * 
     * Note: This method can be very slow depending on the filesystem. 
     * 
     * @param type $batchID
     * @param type $author
     */
    public static function removeOrphanedDiskFiles($batchID, $author) {
        $db = Factory::getDBConn();
        $path = '../crawl_repository/crawlgenie/jobsbank/' . Path::getCleanPath($batchID);
        $diskFileMap = self::getMappedJobFilesArray($path);
        $totalFiles = count($diskFileMap);
        $intervals = $totalFiles / 5;
        $currentInterval = 1;
        $x = 1;
        foreach($diskFileMap as $key => $val) {
            $filePath = $path . '/' . $val['filenames'][0];
            
            if($x == $currentInterval) {
                $this->_logger->info(__METHOD__.': Processing ' . $x .' of '. $totalFiles);
                $currentInterval += $intervals;
                ++$x;
            }
            
            $query = "select count(*) from " . _CG_REPOS_METADATA_TABLE . " where batch_id=? and author=? and path=?";
            $db->preparedQuery($query, array($batchID, $author, $filePath));
            $row = $db->getRow();
            
            if($row < 1) {
                //if does not exist, delete the file.
                $this->_logger->info($filePath.' not found within metadata. Deleting.'); 
                unlink($filePath);
            } else {
                // do another check that the path is correct. If not, correct it.
                /*
                if(trim($row->path) != trim($filePath)) {
                    $this->_logger->info(__METHOD__.': JobID "' . $key . '" found within metadata with wrong path. Updating path to: ' . $filePath);                     
                    
                    $query = "update " . _CG_REPOS_METADATA_TABLE . " set path = ? where id=?";
                    $db->preparedQuery($query, array($filePath, $row->id));
                }*/
            }
        }
    }
    
    
    
    
    
    /**
     * Function that returns a new preconfigured
     * curl handle. 
     */
    private static function getCurlHandle($url) {
        $uriComponents = parse_url($url);
            
        $ch = curl_init(); // init curl
        curl_setopt($ch, CURLOPT_PROXY, \CGConfig::$networkProxy);
        curl_setopt($ch, CURLOPT_URL, $url);
        curl_setopt($ch, CURLOPT_HEADER, 0); // return headers 0 no 1 yes
        curl_setopt($ch, CURLOPT_RETURNTRANSFER, true); // return page 1:yes
        curl_setopt($ch, CURLOPT_TIMEOUT, 200); // http request timeout 20 seconds
        curl_setopt($ch, CURLOPT_FOLLOWLOCATION, true); // Follow redirects, need this if the url changes
        curl_setopt($ch, CURLOPT_MAXREDIRS, 2); //if http server gives redirection responce
        curl_setopt($ch, CURLOPT_USERAGENT, "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0");
        curl_setopt($ch, CURLOPT_COOKIEFILE, NULL); // keep our cookies in memory
        curl_setopt($ch, CURLOPT_HTTPHEADER, array('Host: ' . $uriComponents['host'], 
        'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language: en-US,en;q=0.5',
        'Accept-Encoding: gzip, deflate',
        'Connection: keep-alive'));
        curl_setopt($ch, CURLOPT_AUTOREFERER, TRUE);
        curl_setopt($ch, CURLOPT_SSL_VERIFYPEER, false); // false for https
        curl_setopt($ch, CURLOPT_VERBOSE, true);
        
        return $ch;
    }    
    
    public function getCategoriesToCrawl() {
        return $this->categoriesToCrawl;
    }

    public function setCategoriesToCrawl($categoriesToCrawl) {
        $this->categoriesToCrawl = stristr($categoriesToCrawl, ',') ? explode(',',$categoriesToCrawl) : array($categoriesToCrawl);
    }



}
?>