#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Time-stamp: <2010-04-06 17:04:52 Tuesday by qingant>
import socks
import socket
import HTMLParser
import urllib, urllib2, pycurl
import sys, os, re
import thread, time, Queue
import urlparse
__MAX_SIZE = 10
__DATA_QUEUE = Queue.Queue(__MAX_SIZE*5)
mutex = thread.allocate_lock()
printmutex = thread.allocate_lock()
def safeprint(*arg):
    """Wrapper of default print , which is thread-safe"""
    printmutex.acquire()
    print arg
    printmutex.release()
__URLS_PUSHED = set()
__URLS_NOT_PUSHED = []
__WORKER_STATE = {}
def push_task(urls,refer):

    """
    Push urls into the task queue, if failed , append them as a list to a schedule list which will
    be sheduled
    """
    mutex.acquire()

    condidates = [([url], refer) for url in urls if url not in __URLS_PUSHED]

    try :

        if not condidates == []:
            #safeprint("===========================",condidates)
            for i in condidates :
                __DATA_QUEUE.put(i, block = False)
                __URLS_PUSHED.update(set(i[0]))

    except Queue.Full:
        if not condidates == []:
            safeprint("except no urls")

            __URLS_NOT_PUSHED.extend(condidates)

        pass
    mutex.release()

# def get_task():
#     mutex.acquire()
def worker(worker_num, dowloader):

    while True:
        time.sleep(1)
        try :
            urls_to_download,ref = __DATA_QUEUE.get(block = False)
        except Queue.Empty:
            #safeprint("------------------------------------------------------------------------")
            __WORKER_STATE[worker_num] = False

        else :
            __WORKER_STATE[worker_num] = len(urls_to_download)
            for url in urls_to_download :
                urls,refer = dowloader(url,ref)

                push_task(urls,refer)

def balanser():
    while True:
        time.sleep(1)
        tasks_num = len(__URLS_NOT_PUSHED)
        safeprint("balancer")
        safeprint("tasknumber:  ", tasks_num,"queue size :  ",__DATA_QUEUE.qsize(),__WORKER_STATE)


        if tasks_num == 0:
            continue
        mutex.acquire()

        for i in range(tasks_num):
            try :
                temp,tempref = __URLS_NOT_PUSHED.pop()
                if not temp == []:
                    __URLS_PUSHED.update(set(temp))
                    __DATA_QUEUE.put((temp, tempref), block = False)

            except Queue.Full:
                break
        mutex.release()

def dowloader_naive(url,refer):
    try :
        #c = pycurl.Curl()
        socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 1234)
        socket.socket = socks.socksocket
        #import urllib2
        #print urllib2.urlopen('http://panweizeng.com').read()
        #base_url = urlparse.urlsplit(url).geturl()
        if not "http" in url:
            lst = refer.split("/")[:-1]
            base_url = "/".join(lst)+"/"
            url = base_url + url
            safeprint("laf",url)
            #page = urllib.urlopen(base_url + url).read()
        #elif (not "http://se.633wyt.com/" in url)  and (not "jpg" in url):
        #    safeprint("not in", url)
        #    return [],url
            #c.setopt(pycurl.URL, url)

            #raise Exception()
        else :
            pass
        if "jpg" in url:
            safeprint("####################################################################")
        os.system("tsocks wget " + url)
        name = url.split("/")[-1]
        try :
            safeprint(name)
            safeprint("-----------------------------------------")
            if not ".jpg" in name :
                page = open(name).read()
            safeprint("secceed")
        except :
            return [],None

        #c.setopt(pycurl.PROXY, '127.0.0.1')
        #c.setopt(pycurl.PROXYPORT, 1234)
        #c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
        #c.perform()
        # try :
        #     open("down/"+name,"w").write(page)
        # except Exception as e :
        #     safeprint("file save error",e)

        safeprint("%s     downloaded"%url)
        # parser = parseLinks()
        # try :
        #     parser.feed(page)
        # except :
        #     pass
        # urls = parser.urls
        #re.findall("\".*\.html\"")
        urls = re.findall (" href=\"(.*\.html)\" ",page)
        pattern = ' file="([^"]+\.jpg)"'
        urls1 = re.findall(pattern, page)
        safeprint("urlsssssssssss",urls)
        return urls + urls1,url
    except Exception as e:
        safeprint(e,"from downloader_naive",url)
        return []


class parseLinks(HTMLParser.HTMLParser):
    def __init__(self):
        HTMLParser.HTMLParser.__init__(self)
        self.urls = []
    def handle_starttag(self, tag, attrs):
        if tag == "a":
            for name, value in attrs:
                if name == "href":
                    self.urls.append(value)
if __name__ == '__main__':
    print "begin"
    #base_url = "http://se.633wyt.com/feizhuliu/"
    # url0 = "http://se.633wyt.com/List_feizhuliu/3387407_50_1.html"
    # url = "http://se.633wyt.com/List_feizhuliu/3387407_67_1.html"
    # url1 = "http://se.633wyt.com/List_feizhuliu/3387407_71_1.html"
    # url2 = "http://se.633wyt.com/List_feizhuliu/3387407_72_1.html"
    # url3 = "http://se.633wyt.com/List_feizhuliu/3387407_74_1.html"
    url = "http://www.9aaa.org/index.php"
    __URLS_NOT_PUSHED.append(([url],None))
    for i in range(__MAX_SIZE):
        thread.start_new_thread(worker, (i, dowloader_naive))
        #thread.start_new_thread(balanser,())
    balanser()

        #thread.wait()
#def worker

# class site:
#     """  """
#     def __init__(self, base_url, catch_depth = 3):
#         self.base_url = base_url
#         self.catch_depth = catch_depth
#         self.tasks = {}
#         self.workers = None
#     def _get_all_urls(url):
#         """get all urls in the page identified by url"""
#         url_parser = parseLinks()

# lParser = parseLinks()
# url = "http://se.633wyt.com/List_feizhuliu/3387407_50_1.html"
# url = "http://localhost/"
# request = urllib2.Request(url)
# #request.add_header('Range', 'bytes=%d-%d' %self.headerrange)
# #request.set_proxy('127.0.0.1:1234', "sock")
# conn = urllib2.urlopen(request)

# s = conn.read()
# #print s

# lParser.feed(s)

# lParser.close()


