#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'yuzhou'
# Python Network Programming Cookbook 6.7 代码示例:从指定页面及其包含链接页面中查找字符串

import argparse
import sys
import httplib
import re

processed = []


def search_links(url, depth, search):
    print(url, depth, search)
    # Process http links that are not processed yet
    url_is_processed = (url in processed)
    if (url.startswith("http://")) and (not url_is_processed):
        processed.append(url)
        url = host = url.replace("http://", "", 1)
        path = "/"

        urlparts = url.split("/")
        if len(urlparts) > 1:
            host = urlparts[0]
            path = url.replace(host, "", 1)
        print "Crawling URL path:%s%s " % (host, path)
        conn = httplib.HTTPConnection(host)
        req = conn.request("GET", path)
        print "request sent"
        result = conn.getresponse()
        if result.status == 200:
            # print "check content type"
            content_type = result.getheader("Content-Type")
            if type(content_type).__name__ == 'str' and content_type.startswith("text/html"):
                print "reading content from result"
                contents = result.read()
                # print type(contents), contents
                all_links = re.findall('href="(.*?)"', contents)
                print "Find %d links from %s%s" % (len(all_links), host, path)

                if search in contents:
                    print "Found " + search + " at " + url

                print " === level %d: processing %d links" %(depth, len(all_links))
                for href in all_links:
                    if href.startswith("/"):
                        href = "http://" + host + href
                    if depth > 0:
                        search_links(href, depth-1, search)
            else:
                print "content type: %s is not supported to search" % content_type
            # for href in all_links:
            #     if href.endswith(".pdf"):
            #         print "Downloading file: %s " % href
        else:
            print "Request error: %d" % result.status
    else:
        print "Skipping link: %s ..." % url


if __name__ == '__main__':
    print 'Begin now'
    parser = argparse.ArgumentParser(description='Webpage link crawler')
    parser.add_argument('--url', action='store', dest='url', required=True)
    parser.add_argument('--query', action='store', dest='query', required=True)
    parser.add_argument('--depth', action='store', dest='depth', default=2)

    given_args = parser.parse_args()
    print given_args
    try:
        search_links(given_args.url, given_args.depth, given_args.query)
    except KeyboardInterrupt:
        print "Aborting search by user request"
