#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Time-stamp: <2010-03-09 03:29:03 Tuesday by qingant>

import HTMLParser
import urllib, urllib2
import sys
import thread, time, Queue

__MAX_SIZE = 10
__DATA_QUEUE = Queue.Queue(__MAX_SIZE*5)
mutex = thread.allocate_lock()
printmutex = thread.allocate_lock()
def safeprint(*arg):
    printmutex.acquire()
    print arg
    printmutex.release()
__URLS_PUSHED = set()
__URLS_NOT_PUSHED = []
__WORKER_STATE = {}
def push_task(urls):
    mutex.acquire()
    try :
        __DATA_QUEUE.put([url for url in urls if url not in __URLS_PUSHED], block =False)

        __URLS_NOT_PUSHED.append([url for url in urls if url not in __URLS_PUSHED])
    except Queue.Full:
        pass
    mutex.release()

# def get_task():
#     mutex.acquire()
def worker(worker_num, dowloader):
    while True:
        try :
            urls_to_download = __DATA_QUEUE.get(block = False)
        except Queue.Empty:
            #safeprint("------------------------------------------------------------------------")
            __WORKER_STATE[worker_num] = False
            time.sleep(3)
        else :
            __WORKER_STATE[worker_num] = True
            for url in urls_to_download :
                urls = dowloader(url)
                push_task(urls)

def balanser():
    while True:
        print "balancer"
        time.sleep(1)
        tasks_num = len(__URLS_NOT_PUSHED)
        if tasks_num == 0:
            continue
        mutex.acquire()
        safeprint(tasks_num,__DATA_QUEUE.qsize(),__WORKER_STATE)
        for i in range(tasks_num):
            try :
                temp = __URLS_NOT_PUSHED.pop()
                __URLS_PUSHED.update(set(temp))

                __DATA_QUEUE.put(temp, block = False)

            except Queue.Full:
                break
        mutex.release()

def dowloader_naive(url):
    try :
        if not "http" in url:
            page = urllib.urlopen(base_url + url).read()
        else :
            page = urllib.urlopen(url).read()


        safeprint("%s     downloaded"%url)
        parser = parseLinks()
        parser.feed(page)
        urls = parser.urls
        return urls
    except Exception as e:
        safeprint(e,"from downloader_naive",url)
        return []


class parseLinks(HTMLParser.HTMLParser):
    def __init__(self):
        HTMLParser.HTMLParser.__init__(self)
        self.urls = []
    def handle_starttag(self, tag, attrs):
        if tag == "a":
            for name, value in attrs:
                if name == "href":
                    self.urls.append(value)
if __name__ == '__main__':
    print "begin"
    base_url = "http://localhost/"
    __URLS_NOT_PUSHED.append(["http://localhost/index_new.html"])
    for i in range(__MAX_SIZE):
        thread.start_new_thread(worker, (i, dowloader_naive))
        #thread.start_new_thread(balanser,())
    balanser()

        #thread.wait()
#def worker

# class site:
#     """  """
#     def __init__(self, base_url, catch_depth = 3):
#         self.base_url = base_url
#         self.catch_depth = catch_depth
#         self.tasks = {}
#         self.workers = None
#     def _get_all_urls(url):
#         """get all urls in the page identified by url"""
#         url_parser = parseLinks()

# lParser = parseLinks()
# url = "http://se.633wyt.com/List_feizhuliu/3387407_50_1.html"
# url = "http://localhost/"
# request = urllib2.Request(url)
# #request.add_header('Range', 'bytes=%d-%d' %self.headerrange)
# #request.set_proxy('127.0.0.1:1234', "sock")
# conn = urllib2.urlopen(request)

# s = conn.read()
# #print s

# lParser.feed(s)

# lParser.close()
