#!/bin/env python3

import eventlet.green.urllib.request as request
import eventlet.green.urllib.parse as parse
import eventlet.queue as queue
import eventlet

import re

class Node:
    def __init__(self, url, depth=0):
        self.url = url
        self.depth = depth
        self.html = ''
        self.links = []

    def set_html(self, html):
        self.html = html

    def set_links(self, links):
        self.links.extend(links)


class Crawler:
    def __init__(self, url, max_dep=1, timeout=10, charset='utf-8'):
        self.root = url
        self.charset = charset
        self.max_dep = max_dep
        self.timeout = timeout
        self.fetch_queue = queue.Queue()
        self.parse_queue = queue.Queue()
        self.work_queue = queue.Queue()
        self.semaphore = eventlet.semaphore.Semaphore()
        self.regrex = re.compile(r'href=\"(http.*?)\"')
        self.finished = []
        self.netloc = parse.urlparse(self.root).netloc
        self.add_url([self.root])

    def build_opener(self, headers=None, proxy=None, cookies=None):
        self.opener = request.build_opener()

        if headers != None:
            self.opener.addheaders = headers
        if proxy != None:
            proxy_handler = request.ProxyHandler(proxy)
            self.openner.add_handler(proxy_handler)
        if cookies != None:
            pass

    def fetch(self):
        while True:
            try:
                node = self.fetch_queue.get(timeout=self.timeout)
            except:
                break

            try:
                t = self.opener.open(node.url)
                h = t.read().decode(self.charset, 'ignore')
            except Exception as e:
                print(e)
                continue
    
            self.fetch_queue.task_done()
            node.set_html(h)

            self.parse_queue.put(node)
            self.work_queue.put(node)

    def parse(self):
        while True:
            try:
                t = self.parse_queue.get(timeout=self.timeout)
            except:
                break
            links = re.findall(self.regrex, t.html)
            t.set_links(links)
            self.parse_queue.task_done()

            self.add_url(links)

    def isfetched(self, url):
        self.semaphore.acquire()
        if url.rstrip('/') in self.finished:
            exist = True
        else:
            exist = False
        self.semaphore.release()
        return exist


    def add_url(self, urls):
        for i in urls:
            if self.isfetched(i):
                continue

            result = parse.urlparse(i)
            if result.scheme != 'http':
##            print('Can not support the scheme: %s' %result.scheme)
                continue

            if result.netloc != self.netloc:
                continue
        
            depth = result.path.rstrip('/').count('/')

            if depth > self.max_dep:
                continue

            n = Node(i, depth)

            self.semaphore.acquire()
            self.finished.append(i.rstrip('/'))
            self.semaphore.release()

            self.fetch_queue.put(n)

    def work(self):
        my_reg = re.compile(r'img.*?src=\"(.*?\.jpg)\"')
        
        while True:
            try:
                node = self.work_queue.get(timeout=self.timeout)
            except:
                break

            print(node.url)
            print(re.findall(my_reg, node.html))
##            print(node.html)
            self.work_queue.task_done()

            

    def run(self):
        pool = eventlet.GreenPool()
        pool.spawn_n(self.fetch)
        pool.spawn_n(self.fetch)
        pool.spawn_n(self.fetch)
        pool.spawn_n(self.parse)
        pool.spawn_n(self.work)

        pool.waitall()


if __name__ == '__main__':
    url = 'http://www.mzitu.com/'
    timeout = 10
    max_dep = 1

    spider = Crawler(url, max_dep, timeout)
    spider.build_opener()
    spider.run()
