#!/usr/bin/env python

import urllib2
from urlparse import urlparse, urljoin
import HTMLParser
from redis import Redis
from url_normalize import url_normalize
from multiprocessing import Queue, Pool, Process, Manager, Value
import time
import sys
import copy_reg
import types
import signal, os
import math

def _pickle_method(method):
    func_name = method.im_func.__name__
    obj = method.im_self
    cls = method.im_class
    return _unpickle_method, (func_name, obj, cls)

def _unpickle_method(func_name, obj, cls):
    for cls in cls.mro():
        try:
            func = cls.__dict__[func_name]
        except KeyError:
            pass
        else:
            break
    return func.__get__(obj, cls)

copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)


class HttpDownloader(object):
    class LinksParser(HTMLParser.HTMLParser):
        def __init__(self,pageHTML):
            HTMLParser.HTMLParser.__init__(self)
            self.links = []
            try:
                self.feed(pageHTML)
            except: pass

        def handle_starttag(self, tag, attrs):
            if tag == 'a':
                for name,val in attrs:
                    if name == 'href':
                        self.links.append(val)

        def handle_endtag(self, tag): pass

    def __init__(self, timeout=180):
         self.timeout=timeout

    def _getHttpPage(self, url):
        req = urllib2.Request(url)
        req.add_header('User-agent', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)')
        try:
            http_response = urllib2.urlopen(req)
        except: return ""
        return http_response.read()

    def _getLinks(self, page):
        linksParser = HttpDownloader.LinksParser(page)
        res = []
        for site in linksParser.links:
            if self._filter(site):
                res.append(site)
        return res

    def _filter(self, url):
        valid_extension = ["html", "htm", "php", "php3", "asp"]
        parts = urlparse(url).path.split(".")
        parts_len = len(parts)
        if len(parts) > parts_len:
            if not parts[parts_len] in valid_extension:
                return False
        return True

    def getLinksFromUrl(self, url):
        page = self._getHttpPage(url)
        links = self._getLinks(page)
        newlinks = []
        for link in links:
            linkinfo = urlparse(link)
            if linkinfo.scheme == "":
                link=urljoin(url, linkinfo.path)
            if linkinfo.scheme != "http":
                continue
            newlinks.append(link)
        return newlinks

class CrawlerWorker(object):
        def __init__(self, visited_queue, hd):
            self.hd = HttpDownloader()
            self.visited_queue = visited_queue

        def __call__(self, link):
            newlinks = self.hd.getLinksFromUrl(link)
            data = {'visisted' : link, 'newlinks' : newlinks}
            #self.visited_queue.put({'visisted' : link, 'newlinks' : newlinks})
            return data

class CrawlerDbToQueue(object):
    def __init__(self, redis, tovisit_key, tovisit_queue, tovisit_event, invisit_proxy):
        self.redis = redis
        self.tovisit_key = tovisit_key
        self.tovisit_queue = tovisit_queue
        self.tovisit_event = tovisit_event
        self.invisit_proxy = invisit_proxy

        self.max_fails= 10
        self.fails_in_row= 0
        self.sleep_secs= 20

    def __call__(self):
        loop = True
        while loop:
            link = self.redis.srandmember(self.tovisit_key)
            if link == None:
                self.tovisit_event.wait()
                continue
            if self.fails_in_row >= self.max_fails:
                print "dormo un po!"
                time.sleep(self.sleep_secs)
                print "sveglio!"
                self.fails_in_row=0
                continue
            if link in self.invisit_proxy:
                self.fails_in_row+=1
                continue
            self.fails_in_row=0
            print "aggiungo! %s" % (self.tovisit_queue.qsize()) 
            self.tovisit_queue.put(link)
            print "aggiunto! %s" % (self.tovisit_queue.qsize()) 
            self.invisit_proxy.append(link)
            self.tovisit_event.clear()

class CrawlerQueueToDb(object):
    def __init__(self, redis, tovisit_key, tovisit_queue, tovisit_event, invisit_proxy, visited_key, visited_queue):
        self.redis = redis
        self.tovisit_key = tovisit_key
        self.tovisit_queue = tovisit_queue
        self.tovisit_event = tovisit_event
        self.invisit_proxy = invisit_proxy
        self.visited_key = visited_key
        self.visited_queue = visited_queue

    def __call__(self):
        loop = True
        while loop:
            data = self.visited_queue.get()
            link = data['visisted']
            newlinks = data['newlinks']
            #print "visited: %s" % (link)
            self.redis.sadd(self.visited_key, link)
            self.invisit_proxy.remove(link)
            self.redis.srem(self.tovisit_key, link)
            for newlink in newlinks:
                newlink=url_normalize(newlink)
                if self.redis.sismember(self.visited_key, newlink) == 1:
                    continue
                self.redis.sadd(self.tovisit_key, newlink)
            self.tovisit_event.set()
            self.tovisit_event.clear()

class Crawler(object):

    def __init__(self, maxprocess):
        self.maxprocess = maxprocess
        self.maxtaskperchild = 100
        #self.tovisit_queue_max_size = 2 * self.maxprocess
        self.tovisit_queue_max_size = self.maxprocess + math.floor(0.5*self.maxprocess)
        self.visited_queue_max_size = 100 * self.maxprocess
        self.tovisit_key = "tovisit_urls"
        self.visited_key = "visited_urls"

        self.manager = Manager()
        self.tovisit_queue = None #self.manager.Queue(self.tovisit_queue_max_size)
        self.visited_queue = None #self.manager.Queue(self.visited_queue_max_size)

        self.max_fails= 10
        self.fails_in_row= 0
        self.sleep_secs= 20


        self.tovisit_event = self.manager.Event()
        self.tovisit_event.clear()
        self.invisit_proxy = self.manager.list()

        self.pool = Pool(processes=self.maxprocess) #, maxtasksperchilds=self.maxtask)
        self.hd = HttpDownloader()
        self.redis = Redis()

        self.worker = CrawlerWorker(self.visited_queue, self.hd)
        #self.dbtoqueue = CrawlerDbToQueue(Redis(), self.tovisit_key, self.tovisit_queue, self.tovisit_event, self.invisit_proxy)
        #self.queuetodb = CrawlerQueueToDb(Redis(), self.tovisit_key, self.tovisit_queue, self.tovisit_event, self.invisit_proxy, self.visited_key, self.visited_queue)

    def _toRedis(self, data):
        link = data['visisted']
        newlinks = data['newlinks']
        #print "visited: %s" % (link)
        self.redis.sadd(self.visited_key, link)
        self.invisit_proxy.remove(link)
        self.redis.srem(self.tovisit_key, link)
        for newlink in newlinks:
            newlink=url_normalize(newlink)
            if self.redis.sismember(self.visited_key, newlink) == 1:
                continue
            self.redis.sadd(self.tovisit_key, newlink)
        self.tovisit_event.set()
        self.tovisit_event.clear()
        


    def run(self):
        #Process(target=self.dbtoqueue).start()
        #Process(target=self.queuetodb).start()

        loop = True
        while loop:
            if len(self.invisit_proxy) > self.tovisit_queue_max_size:
                self.tovisit_event.wait()
            link = self.redis.srandmember(self.tovisit_key)
            if link == None:
                self.tovisit_event.wait()
                continue
            if self.fails_in_row >= self.max_fails:
                print "dormo un po!"
                time.sleep(self.sleep_secs)
                print "sveglio!"
                self.fails_in_row=0
                continue
            if link in self.invisit_proxy:
                self.fails_in_row+=1
                continue
            self.fails_in_row=0
            self.invisit_proxy.append(link)
            self.pool.apply_async(func=self.worker, args=[link], callback=self._toRedis)
            self.tovisit_event.clear()

    def addLink(self, link):
        self.redis.sadd(self.tovisit_key, link)



crawler = Crawler(1)

crawler.addLink("http://www.google.com/search?q=fileserve")
crawler.run()

#addLink(redis, key_store, key_visited, "http://www.google.com/search?q=fileserve")

