'''
Created on Jul 5, 2010

@author: v-grwang
'''


import re
import urllib2
import urlparse
import threading
import Queue
import time
from URLPicker import UrlPicker



IncludingList = []
ExcludingList = []

tocrawl = set(["http://sh.ganji.com/fang1/"])
crawled = set([])

linkregex = re.compile('<a\s*href=[\'|"](.*?)[\'"].*?>')

class CrawlResult():
    def __init__(self, isDownloaded, newURLs):
        self.IsDownloaded = isDownloaded
        self.NewURLs = newURLs
        self.NoNewURL = False

class Crawler(threading.Thread):
    UrlLimiter = UrlPicker(IncludingList, ExcludingList) 
    
    def __init__(self, queue):
        self.Queue = queue
    
    def run(self):
        try:
            crawling = tocrawl.pop()
            print "Downloading - " + crawling
        except:
            self.result = CrawlResult()
        
        
        try:
            response = urllib2.urlopen(crawling)
        except:
            pass
        
    
        msg = response.read()
    
        links = linkregex.findall(msg)
        crawled.add(crawling)
        for link in (links.pop(0) for _ in xrange(len(links))):
            if link.startswith('/'):
                link = 'http://' + url[1] + link
            elif link.startswith('#'):
                link = 'http://' + url[1] + url[2] + link
            elif not link.startswith('http'):
                link = 'http://' + url[1] + '/' + link
            if link not in crawled:
                print "New Link: " + link
                tocrawl.add(link)

for x in range(3):
    try:
        crawling = tocrawl.pop()
        print crawling
    except KeyError:
        raise StopIteration
    
    url = urlparse.urlparse(crawling)
    try:
        response = urllib2.urlopen(crawling)
    except:
        continue
    
    msg = response.read()
    
    links = linkregex.findall(msg)
    crawled.add(crawling)
    for link in (links.pop(0) for _ in xrange(len(links))):
        if link.startswith('/'):
            link = 'http://' + url[1] + link
        elif link.startswith('#'):
            link = 'http://' + url[1] + url[2] + link
        elif not link.startswith('http'):
            link = 'http://' + url[1] + '/' + link
        if link not in crawled:
            print "New Link: " + link
            tocrawl.add(link)
