<pre>
My Web Downloader

Resources followed: IMG:SRC, LINK:HREF, SCRIPT:SRC, A:HREF, AREA:HREF

Downloads contents of a web site recreating local version.
Only resources located under the main page's domain are retrieved.
Respects robots.txt.
Follows redirects.
Logs response codes for each resource and prints report (resources alphabetized and grouped by response code).
Logs content-type for each resource and prints report (resources alphabetized and grouped by content-type).
Throttle option so as not to overload web server.
Customizable USER_AGENT.
Handles cookies (required internally to keep from infinite looping).

In:
    -s = the base URL to retrieve (where to start the "spider")
    -dir = local location for storing retrieve files, if not specified, uses normalized domain name + directory path indicated in -s

1. Read contents of start page and...

a. get the src or href value for all resources
b. walk through all the A:HREF downloading each, normalizing URL and appending resource values (what?)
c. download resources to their respective locations


1. Get page
2. Append resources to resources array
    A resource is unique if it has the same path, basename, extension, and query string parameters and values
3. Append all a:hrefs to a:href array
    A href is unique if it has the same path, basename, extension, and query string parameters and values
4. Mark current page as dug
5. Save page to client
6. Pop new page from top of queue and start over
7. Get all resources

For each resource I want to record the following:
- original href value
- pages where href occurs
- HTTP status
- MIME type (value of content-type)
- bytes

TODO
- handle ../../ type links (remove them if they return 404 until we get a 200 reponse or all .. are gone, whichever comes first)
- update internal links to dynamic pages so they work on a flat file system
- unify references to directory, directory/, and directory/index.html so they all end up as the same document (if they are the same document)
- add option to turn off flattenning of dynamic sites (currently sites are flattened by default to maintain navigation)
- add crawl statistics report a la linklint
- optimize download of supporting graphics so it doesn't take so long
- add support for base href tag
- add support for mailto hrefs (well, don't let the system puke on them)
- add a maximum URL length to minimize the possibility of LOOPS GONE WILD

</pre>
<?php
ini_set('error_reporting', E_ALL & ~E_NOTICE & ~E_STRICT & ~E_DEPRECATED);
include_once( 'phpQuery.php' );

function gtsr_dirname ( $dir )
{
    $p = '/^.+?(\/|\\\)$/';
    $tot = preg_match ( $p, $dir, $matches );
    if( $tot >= 1 ) {
        return substr( $dir, 0, strlen( $dir ) - 1 );
    } else {
        return dirname( $dir );
    }
}

function gtsr_basename ( $path )
{
    $p = '/^.*?([^\/\\\]*?)(\/|\\\)$/';
    $tot = preg_match ( $p, $path, $matches );
    if( $tot >= 1 ) {
        // this is a directory, return ''
        return '';
    } else {
        return basename( $path );
    }
}

/*
    http://www.searchtools.com/robots/robot-checklist.html
*/
class digger
{
    private $crawled_urls = array();
    private $urls_2_crawl = array();
    private $illegal_fs_chars = '/[^0-9a-z\-\_]/i';
    private $DirectoryIndex = 'index.html';

    /*
        local_base_path string corresponds to the path portion of the start url. HTML documents outside this path are not crawled nor downloaded.
        Note that if you use a directory without a trailing slash as the start URL, this variable will resolve to the parent of the directory and
        thus you will get both children and siblings.
    */
    private $local_base_path = '';
    
    /* local_site_dir_name string corresponds to the host name of the site being crawled. */
    private $local_site_dir_name = '';
    
    public function __construct ( $start='' )
    {
        $this->l = new Log();
        $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': ' . 'Starting crawl with URL ' . $start );
        if( strlen( $start ) > 0 ) {
            $url = parse_url( $start );
            $this->start_url = $start;
            $start = $this->make_unique_url( $start );
            $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': ' . 'Start URL normalized as ' . $start );
            $this->start_protocol = $url['scheme'];
            $this->start_host = $url['host'];
            $this->start_path = $url['path'];
            $this->start_query = $url['query'];
            $this->urls_2_crawl[] = $start;
            $this->local_base_path = gtsr_dirname( $url['path'] );
            $this->local_path = dirname( __FILE__ ) . DIRECTORY_SEPARATOR . 'Sites' . DIRECTORY_SEPARATOR . $url['host'] . gtsr_dirname( $url['path'] );
            $this->req = new HttpRequest();
        } else {
            trigger_error ( 'The start string must not be empty' );
        }
    }
    
    public function make_unique_url ( $in )
    {
        $u = parse_url( $in );
        
        if( $u['query'] != '' ) {
            $p = '/\&/i';
            $q = preg_split ( $p, $u['query'] );
            
            $tot_qs = count( $q );
            for( $i = 0; $i < $tot_qs; $i++ ) {
                $param = explode ( '=', $q[$i] );
                $qs[$param[0]] = $param[1];
            }
            ksort ( $qs );
            $q = http_build_query ( $qs );
        } else {
            $q = '';
        }
        
        return $u['scheme'] . '://' . $u['host'] . $u['path'] . ($q != '' ? '?' . $q : '');
    }
    
    public function crawl ()
    {
        while (count( $this->urls_2_crawl ) > 0) {
			$url = array_shift( $this->urls_2_crawl );
			$this->curr_url = $url;
			$this->l->log(__FUNCTION__ . ':' . __LINE__ . ': ' . 'Crawling ' . $url );
			if( $this->can_crawl( $url ) ) {
				$this->fetch_page( $url );
				$this->crawled_urls[] = $url;
			} else {
				// log fact that this is a duplicate url or falls outside the local_base_path
				$this->l->log(__FUNCTION__ . ':' . __LINE__ . ': ' . $url . ' is either a duplicate or is outside the local_base_path.');
			}
			$this->l->log(__FUNCTION__ . ':' . __LINE__ . ': ' . 'Finished crawling ' . $url);
			$this->l->log(' ');
        }
    }
    
    /*
     * I fetch a page and process it's URLs 
     */
    public function fetch_page ( $url )
    {
		$this->req->setURL( $url );
		$this->req->send();
		$page = $this->req->getResponseBody();
		$status = $this->req->getResponseCode();
		$cookies = $this->req->getResponseCookies();
		$ct = $this->req->getResponseHeader('Content-Type');
		$cl = $this->req->getResponseHeader('Content-Length');
		if( is_array( $cookies ) and count( $cookies ) > 0 ) {
			$this->req->_requestHeaders['cookie'] = null;
			$this->req->addCookies( $cookies );
		}
		$this->l->log(__FUNCTION__ . ':' . __LINE__ . ': ' . 'Response code for ' . $url . ' is ' . $status . ' and Content-type is ' . $ct);
		switch( $status ) {
			case '403':
			case '403.1':
			case '404':
			case '405':
			case '410':
				// page not found
				break;
			case '301':
			case '302':
				// add new url to urls_2_crawl since we don't know if it will result in a new redirect or not, don't save anything
				$this->append_url( $this->canonicalize_href_src( $url, $this->req->getResponseHeader('Location') ) );
				break;
			default:
				$a_ct = explode ( ';', $ct );
				$s_ct = $a_ct[0];
				switch( $s_ct ) {
					case 'text/html':
					case 'text/xml':
					case 'text/javascript':
					case 'text/css':
						$this->parse_page( $page );
						break;
					default:
						
						break;
				}
				$this->save_local( $this->get_local_file_name( $url ), $page );
				break;
		}
    }
    
    /* I return true if the url passes all "can crawl" tests. For example, the URL must not be in the list of crawled URLs. Also, the HTML document must be in the same directory tree as local_base_path. */
    public function can_crawl ( $url )
    {
    	if (strpos($url, '//') === 0) {
    	    $url = 'http:' . $url;
    	}
        if( !in_array( $url, $this->crawled_urls ) ) {
            if( $this->resource_is_child( $url ) === true ) {
                $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': ' . $url . ' has not been crawled and is a child (or a non-HTML page). This URL can be crawled.' );
                return true;
            } else {
                $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': ' . $url . ' falls outside local_base_path.' );
            }
        } else {
            $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': ' . $url . ' has already been crawled.' );
        }
        return false;
    }
    
    public function resource_is_child ( $url )
    {
        $s = parse_url ( $this->start_url );
        $u = parse_url( $url );
        if( $s['host'] == $u['host'] ) {
            $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Resource is on the same host.');
            if( stripos( gtsr_dirname( $u['path'] ), $this->local_base_path ) !== false ) {
                $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Resource (' . $url . ') is below the start URL since ' . gtsr_dirname( $u['path'] ) . ' is a subset of ' . $this->local_base_path);
                return true;
            } else {
                $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Resource (' . $url . ') is NOT below the start URL.');
            }
        }
        return false;
    }
    
    /*
        $path string urldecoded string suitable for use on a filesystem.
    */
    public function save_local ( $path, $content )
    {
        $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Saving ' . $path . ' to file system.');
        $p = pathinfo( $path );
        if( !is_dir( $p['dirname'] ) ) {
            mkdir ( $p['dirname'], 0777, TRUE );
        }
        $fp = fopen ( $path, 'w' );
        if( $fp ) {
            fwrite ( $fp, $content );
            fclose ( $fp );
            return true;
        } else {
            // log an error
            return false;
        }
    }
    
    // I return the local file system path for the current document. I append the directoryindex if no file name is specified.
    public function get_local_file_name ( $url='' )
    {
        $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Getting local file name for ' . $url );
        $u = parse_url( $url );
        if( $u['query'] != '' ) {
            $p = $this->illegal_fs_chars;
            $query = '.' . preg_replace( $p, '_', $u['query'] ) . '.html';
        } else {
            $query = '';
        }
        
        $temp = substr( $u['path'], strlen( $this->local_base_path ) );
        $this_dir = gtsr_dirname( $this->local_path . $temp );
        $this_file = gtsr_basename( $temp );
        if( $this_file == '' ) {
            $this_file = $this->DirectoryIndex;
        }
        $this_file .= $query;
        $final_path = rawurldecode( $this_dir . DIRECTORY_SEPARATOR . $this_file );
        $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Local file name for ' . $url . ' is ' . $final_path );
        return $final_path;
    }
    
    /* I parse the page looking for links to other pages and resources. */
    public function parse_page ( $page )
    {
        $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Parsing results of ' . $this->curr_url);
        $test = phpQuery::newDocument( $page );
        $asrc = pq('a,img,script,link,area,frame');
        $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Found ' . count ( $asrc ) . ' elements with HREF or SRC attributes.');
        foreach( $asrc->elements as $node ) {
            $r = array();
            $r['element'] = strtolower($node->localName);
            switch ( $r['element'] ) {
                case 'img':
                case 'script':
                case 'frame':
                    $URLsource = 'src';
                    break;
                default:
                    $URLsource = 'href';
                    break;
            }
            $r['parsedURL'] = parse_url( $node->attributes->getNamedItem($URLsource)->nodeValue );
            switch ( $r['element'] )
            {
                case "img":
                    $r['URL'] = $this->canonicalize_href_src( $this->curr_url, trim( $node->attributes->getNamedItem('src')->nodeValue ) );
                    $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Found an IMG element with URL ' . $r['URL']);
                    break;
                case "script":
                    if ($node->attributes->getNamedItem('src')->nodeValue != '') {
                        $r['URL'] = $this->canonicalize_href_src( $this->curr_url, trim( $node->attributes->getNamedItem('src')->nodeValue ) );
                        $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Found an SCRIPT element with URL ' . $r['URL']);
                    }
                    break;
                case "link":
                    $r['URL'] = $this->canonicalize_href_src( $this->curr_url, trim( $node->attributes->getNamedItem('href')->nodeValue ) );
                    if ( strtolower ( $node->attributes->getNamedItem('rel')->nodeValue ) == 'stylesheet' ) {
                        $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Found a STYLESHEET element with URL ' . $r['URL']);
                        // try to get images and other stylesheets
                        $s = file_get_contents ( rawurldecode($r['URL']) );
                        $p = '/url\s*\(\s*(\\\'|")?(\/|http\:\/\/[^\/]+?)?(.+?\.(jpg|gif|png|jpeg))(\\\'|")?\s*\)/ims';
                        $tot_matches = preg_match_all ( $p, $s, $matches );
                        if ($tot_matches > 0) {
                            for( $i = 0; $i < $tot_matches; $i++ ) {
                                switch( $matches[2][$i] ) {
                                    case '/':
                                        
                                        break;
                                    default:
                                        if( strtolower( substr( $matches[$i][2], 0, 4 ) ) == 'http' ) {
                                            // what a pain in the ass
                                        } else {
                                            // make href relative to dir of current url
                                            $last_slash_pos = strripos ( $r['URL'], '/' );
                                            $ne['element'] = 'img';
                                            $ne['URL'] = substr( $r['URL'], 0, strlen( $r['URL'] ) - (strlen( $r['URL'] ) - $last_slash_pos) + 1 ) . $matches[3][$i];
                                            $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Found an element in a STYLESHEET with URL ' . $ne['URL']);
                                            $this->resources[] = $ne;
                                        }
                                        break;
                                }
                            }
                        }
                    }
                    break;
                default:
                    // normalize href
                    $r['URL'] = $this->canonicalize_href_src( $this->curr_url, trim( $node->attributes->getNamedItem('href')->nodeValue ) );
                    $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Found a A element ' . $r['URL']);
                    $this->append_url( $this->make_unique_url( $r['URL'] ) );
                    break;
            }
            
            if ($r['URL'] != '') {
                $this->resources[] = $r;
            }
        }
    }
    
    public function append_url ( $url )
    {
        if( $this->can_crawl( $url ) ) {
            if( !in_array ( $url, $this->urls_2_crawl ) ) {
                if( !in_array ( $url, $this->crawled_urls ) ) {
                    if( $url != $this->curr_url ) {
                        $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Appending ' . $url . ' to crawl queue.');
                        $this->urls_2_crawl[] = $url;
                    }
                }
            }
        } else {
            $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Skipping ' . $url . ' because it is not elligible for crawling.');
        }
    }
    
    /* 
        I convert a site or document relative href into a full href (with scheme and host) if needed.
        $url string The url to use as the source for canonicalization.
        $href string the source requiring canonicalization.
    */
    public function canonicalize_href_src ( $url, $href )
    {
        $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Making ' . $href . ' a complete URL based on ' . $url);
        $url = parse_url( $url );
        if (strpos($href, '//') === 0) {
            $href = $url['scheme'] . ':' . $href;
        }
        $href = parse_url( $href );
        if( $href['scheme'] == '' ) {
            if( $href['host'] == '' ) {
                if( $href['path'] != '' ) {
                    $href['path'] = $this->remove_relative_refs( $href['path'] );
                    switch( $href['path'][0] ) {
                        case '/':
                            $href['scheme'] = $url['scheme'];
                            $href['host'] = $url['host'];
                            $href = $url['scheme'] . '://' . $url['host'] . $href['path'] . ( $href['query'] != '' ? '?' . $href['query'] : '' );
                            break;
                        default:
                            $href = $url['scheme'] . '://' . $url['host'] . gtsr_dirname( $url['path'] ) . '/' . $href['path'] . ( $href['query'] != '' ? '?' . $href['query'] : '' );
                            break;
                    }
                } else {
                    // maybe there's just a query string?
                    if( $href['query'] != '' ) {
                        $c = parse_url ( $this->curr_url );
                        $href = $c['scheme'] . '://' . $c['host'] . $c['path'] . '?' . $href['query'];
                    } else {
                        // maybe just a fragment?
                        if( $href['fragment'] ) {
                            // return the curr_url
                            $href = $this->curr_url;
                        } else {
                            return false;
                        }
                    }
                }
            } else {
                // this is probably an impossible place to be since parse_url fails to recognize the host when the scheme is absent
                // log an error and return
                $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Found a host (' . $href['host'] . ') but not the scheme! This is highly unusual because parse_url fails to recognize the host when the scheme is absent (it is just considered to be a part of the path).');
                return false;
            }
        } else {
            $href = $href['scheme'] . '://' . $href['host'] . $href['path'] . ( $href['query'] != '' ? '?' . $href['query'] : '' );
        }
        $final_href = $this->make_unique_url( $href );
        $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Converted ' . $href . ' into ' . $final_href);
        return $final_href;
    }
    
    /* lots of edge cases possible here, only interested in the most obvious for now */
    public function remove_relative_refs ( $path )
    {
        $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Removing relative references (../ - if needed) from ' . $path );
        $p = '/\.\.\//i';
        $tot_rel_refs = preg_match ( $p, $path, $matches );
        if( $tot_rel_refs > 0 ) {
            $a_path = explode ( '/', $path );
            if( $a_path[0] == '..' ) {
                array_shift ( $a_path );
                $path = '/' . implode ( '/', $a_path );
            } else {
                $parts = count( $a_path );
                for( $i = 0; $i < $parts; $i++ ) {
                    if( $a_path[$i+1] == '..' ) {
                        $new_a_path = array_splice ( $a_path, ($i+1) );
                        break;
                    }
                }
                $path = implode ( '/', $new_a_path );
            }
            return $this->remove_relative_refs( $path );
        }
        return $path;
    }
    
    public function download_resources ()
    {
        $this->l->log('Downloading all the resources (' . count( $this->resources ) . ').');
        foreach ( $this->resources as $r ) {
            if( $r['element'] == 'img' or $r['element'] == 'link' or $r['element'] == 'script') {
                if( $this->can_crawl( $r['URL'] ) == true ) {
                    $local_path = $this->get_local_file_name( $r['URL'] );
                    $this->req->setURL( $r['URL'] );
                    $this->l->log(__FUNCTION__ . ':' . __LINE__ . ': Downloading ' . $r['URL'] . ' to ' . $local_path);
                    $this->req->send();
                    $res = $this->req->getResponseBody();
                    switch( $this->req->getResponseCode() ) {
                        case '200':
                            $this->save_local( $local_path, $res );
                            break;
                        default:
                            $this->l->log('Failed to save the resource ' . $r['URL'] . ' due to a response code of ' . $this->req->getResponseCode());
                            break;
                    }
                }
            }
        }
    }
}

class Log 
{
	public function __construct()
	{
	}
	
	public function log($msg='')
	{
		error_log($msg);
	}
}

$url = $argv[2];
$d = new digger( $url );
$d->crawl();
$d->download_resources();
