#!/usr/bin/env python

import urllib2
from urlparse import urlparse, urljoin
import HTMLParser
from redis import Redis
from url_normalize import url_normalize
from multiprocessing import Queue, Pool, Process, Manager, Value
import time
import sys
import copy_reg
import types
import signal, os
import math
import cProfile

def _pickle_method(method):
    func_name = method.im_func.__name__
    obj = method.im_self
    cls = method.im_class
    return _unpickle_method, (func_name, obj, cls)

def _unpickle_method(func_name, obj, cls):
    for cls in cls.mro():
        try:
            func = cls.__dict__[func_name]
        except KeyError:
            pass
        else:
            break
    return func.__get__(obj, cls)

copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)


class HttpDownloader(object):
    class LinksParser(HTMLParser.HTMLParser):
        def __init__(self,pageHTML):
            HTMLParser.HTMLParser.__init__(self)
            self.links = []
            try:
                self.feed(pageHTML)
            except: pass

        def handle_starttag(self, tag, attrs):
            if tag == 'a':
                for name,val in attrs:
                    if name == 'href':
                        self.links.append(val)

        def handle_endtag(self, tag): pass

    def __init__(self, timeout=180):
         self.timeout=timeout

    def _getHttpPage(self, url):
        req = urllib2.Request(url)
        req.add_header('User-agent', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)')
        try:
            http_response = urllib2.urlopen(req)
        except: return ""
        return http_response.read()

    def _getLinks(self, page):
        linksParser = HttpDownloader.LinksParser(page)
        res = []
        for site in linksParser.links:
            if self._filter(site):
                res.append(site)
        return res

    def _filter(self, url):
        valid_extension = ["html", "htm", "php", "php3", "asp"]
        parts = urlparse(url).path.split(".")
        parts_len = len(parts)
        if len(parts) > parts_len:
            if not parts[parts_len] in valid_extension:
                return False
        return True

    def getLinksFromUrl(self, url):
        page = self._getHttpPage(url)
        links = self._getLinks(page)
        newlinks = []
        for link in links:
            linkinfo = urlparse(link)
            if linkinfo.scheme == "":
                link=urljoin(url, linkinfo.path)
            if linkinfo.scheme != "http":
                continue
            newlinks.append(link)
        return newlinks

def init_worker():
    signal.signal(signal.SIGINT, signal.SIG_IGN)

class CrawlerWorker(object):
        def __init__(self, hd):
            self.hd = HttpDownloader()

        def __call__(self, link):
            newlinks = self.hd.getLinksFromUrl(link)
            data = {'visisted' : link, 'newlinks' : newlinks}
            return data

class Crawler(object):

    def __init__(self, maxprocess):
        self.maxprocess = maxprocess
        self.maxtaskperchild = 100
        self.inprocess_max_size = self.maxprocess + math.floor(0.5*self.maxprocess)
        
        self.max_fails= 10
        self.fails_in_row= 0
        self.sleep_secs= 2
        
        self.hd = HttpDownloader()
        self.redis = Redis()
 
        self.tovisit_key = "tovisit_urls"
        self.visited_key = "visited_urls"

        self.manager = Manager()
        self.tovisit_event = self.manager.Event()
        self.tovisit_event.clear()
        self.invisit_proxy = self.manager.list()

        self.pool = Pool(processes=self.maxprocess, initializer=init_worker) #, maxtasksperchild=self.maxtaskperchild)

        self.worker = CrawlerWorker(self.hd)

        self.loop = False

    def _toRedis(self, data):
        link = data['visisted']
        newlinks = data['newlinks']
        self.redis.sadd(self.visited_key, link)
        self.invisit_proxy.remove(link)
        self.redis.srem(self.tovisit_key, link)
        for newlink in newlinks:
            self.addLink(newlink)
        self.tovisit_event.set()
        self.tovisit_event.clear()
        

    def _run(self):
        while self.loop:
            link = self.redis.srandmember(self.tovisit_key)
            if link == None:
                self.tovisit_event.wait()
                continue
            self.fails_in_row=0
            self.invisit_proxy.append(link)
            self.pool.apply_async(func=self.worker, args=[link], callback=self._toRedis)
            self.tovisit_event.clear()

    def start(self):
        print 'parto'
        self.loop = True
        self._run()

    def stop(self):
        print 'STOP'
        self.loop=False
        self.tovisit_event.set()
        print 'UNLOCK AND FALSE'
        self.pool.close()
        print 'CLOSE'
        self.pool.join()
        print 'JOIN'

    def __call__(self):
        self.start()

    def addLink(self, link):
        try:
            link=url_normalize(link)
        except: pass
        if self.redis.sismember(self.visited_key, link) == 1:
            return
        self.redis.sadd(self.tovisit_key, link)

crawler = Crawler(4)
crawler.addLink('http://http://www.informatik.uni-trier.de/~ley/db/')

signal.signal(signal.SIGINT, lambda sig, frame: stop() )

def stop():
    print "CHIUDO!"
    crawler.stop()
    print "SLEEP"
    time.sleep(5)
    print "EXIT"
    sys.exit()

crawler.start()



