from bs4 import BeautifulSoup
import datetime
from django.template import defaultfilters
from pykka.actor import ThreadingActor
from pykka.registry import  ActorRegistry
import urllib2
from urlparse import urljoin
from mongoutils.mongoutils import MongoHelper
from bs4.element import Comment

import logging
logging.basicConfig(filename='crawler.log',level=logging.DEBUG)

# Create a list of words to ignore
ignorewords=set(['the','of','to','and','a','in','is','it','computrabajo','argentina','empleos','para','los','dias','domingos','text', 'decoration','none', 'hover','color', '000000', 'text', 'decoration', 'underline', '_uacct', 'ua', '2335793', '3', 'urchintracker', 'google_ad_client', 'pub', '5674950954982301', 'google_ad_width', '728', 'google_ad_height', '90', 'google_ad_format', '728x90_as', 'google_ad_type', 'text_image', 'google_ad_channel', '9906857828', '6398031593', '0882312369', 'google_color_border','003399', 'substring', '1', '7', 'google_color_bg','ffffff', 'google_color_link', '003399', 'substring', '1', '7', 'google_color_url', '003399', 'substring', 'google_color_text', 'google_ui_features', 'rc', '6', 'nbsp', 'nbsp', '20', 'de', 'enero', '2013', 'bolsa',  'trabajo', 'buscar', 'lista', 'empresas', 'preguntas', 'frecuentes','otros','pa','ses', 'candidatos','ingresar', 'curr', 'culum', 'modificar', 'eliminar', 'env', 'o', 'ofertas','su', 'buz', 'n', 'suscripci', 'n', 'registre', 'su', 'entrar', 'al', 'sistema', 'panel', 'control','120','600','120x600_as', 'google_ad_type', '2007', '02', '10'])
version = '0.1'
mongo=MongoHelper()

class ManagerZone():
    
    def matchSubZone(self,text):
        subZones = [subZone for subZone in self.subZones if subZone.description.upper() in text.upper()]
        return subZones
    
    def matchZone(self,text):
        zones = [zone for zone in self.zones if zone.description.upper() in text.upper()]
        return zones
        
    def __init__(self):
        zones = list()
        zones.append(Zone(code='AR-B',description='Buenos Aires'))
        zones.append(Zone(code='AR-C',description='Capital Federal'))
        zones.append(Zone(code='AR-K',description='Catamarca'))
        zones.append(Zone(code='AR-H',description='Chaco'))
        zones.append(Zone(code='AR-U',description='Chubut'))
        zones.append(Zone(code='AR-X',description='Cordoba'))
        zones.append(Zone(code='AR-W',description='Corrientes'))
        zones.append(Zone(code='AR-E',description='Entre Rios'))
        zones.append(Zone(code='AR-P',description='Formosa'))
        zones.append(Zone(code='AR-Y',description='Jujuy'))
        zones.append(Zone(code='AR-L',description='La Pampa'))
        zones.append(Zone(code='AR-F',description='La Rioja'))
        zones.append(Zone(code='AR-M',description='Mendoza'))
        zones.append(Zone(code='AR-N',description='Misiones'))
        zones.append(Zone(code='AR-Q',description='Neuquen'))
        zones.append(Zone(code='AR-R',description='Rio Negro'))
        zones.append(Zone(code='AR-A',description='Salta'))
        zones.append(Zone(code='AR-J',description='San Juan'))
        zones.append(Zone(code='AR-D',description='San Luis'))
        zones.append(Zone(code='AR-Z',description='Santa Cruz'))
        zones.append(Zone(code='AR-S',description='Santa Fe'))
        zones.append(Zone(code='AR-G',description='Santiago del Estero'))
        zones.append(Zone(code='AR-V',description='Tierra del Fuego'))
        zones.append(Zone(code='AR-T',description='Tucuman'))
        self.zones = zones
        
        subZones = list()
        subZones.append(SubZone(description='zona norte',zone='AR-B'))
        subZones.append(SubZone(description='zona sur',zone='AR-B'))
        subZones.append(SubZone(description='zona oeste',zone='AR-B'))
        
        subZones.append(SubZone(description='gba norte',zone='AR-B'))
        subZones.append(SubZone(description='gba sur',zone='AR-B'))
        subZones.append(SubZone(description='gba oeste',zone='AR-B'))
        
        subZones.append(SubZone(description='capital federal',zone='AR-C'))
        subZones.append(SubZone(description='caba',zone='AR-C'))
        subZones.append(SubZone(description='ciudad de buenos aires',zone='AR-C'))
        subZones.append(SubZone(description='Ciudad Autonoma de Buenos Aires',zone='AR-C'))
        self.subZones = subZones
        logging.info('ManagerZone init')
#         logging.warning('And this, too')
    
class Zone():
    def __init__(self,code,description):
        self.code            = code
        self.description     =  description
        
    def __str__(self):
        return self.__tojson__().__str__()
    
    def slug(self):
        return defaultfilters.slugify(self.code)
    
    def __tojson__(self):
        return  {"_id": self.code,"description":self.description,"desc":defaultfilters.slugify(self.code)}

class SubZone():
    def __init__(self,description,zone):
        self.description     =  description
        self.zone            =  zone
    
    def slug(self):
        return defaultfilters.slugify(self.description)
    
    def __tojson__(self):
        return  {"zone": self.zone,
                 "description":self.description,
                 "desc":defaultfilters.slugify(self.description)}
    def __str__(self):
        return self.__tojson__().__str__()
        
class Provider():
    def __init__(self,clientmongo):
        self.categories = list()
        self.clientMongo = clientmongo
        
    def begin(self,regexListPage,regexJobAd):  
        logging.info('Provider begin')  
        pagelist = set()
        actors = set()
        for category in self.categories:
            logging.info('category: ' + category.__str__())
            for url in category.getUrls():
                pageCrawled = PageCrawled(url,regexListPage,regexJobAd,category,'',self.clientMongo)
                pagelist.add(pageCrawled)
                actors.add(CrawlerActor.start().proxy())
                
        for actor in actors:
            actor.crawl(pagelist,5).get()
            logging.info("The following category %s was crawled OK." % category)
            logging.info("%s was crawled OK." % self.name)
        ActorRegistry.stop_all()
        logging.info('ActorRegistry stop ')
        
class Crawler(Provider):
    
    def __init__(self):
        #Provider.__init__(self, MongoHelper('mongodb://localhost:27017','computrabajo'))
        
        Provider.__init__(self, MongoHelper())
        self.name = 'COMPUTRABAJO'
        self.categories.append(Category('Administracion/Oficina', ['http://www.computrabajo.com.ar/bt-ofr-SC000-1.htm'],self.name))
        self.categories.append(Category('Arte/Diseno/Medios',      ['http://www.computrabajo.com.ar/bt-ofr-SC001-1.htm'],self.name))
        self.categories.append(Category('Cientifico/Investigacion',['http://www.computrabajo.com.ar/bt-ofr-SC002-1.htm'],self.name))
        self.categories.append(Category('Informatica/Telecom',     ['http://www.computrabajo.com.ar/bt-ofr-SC003-1.htm'],self.name))
        self.categories.append(Category('Direccion/Gerencia',      ['http://www.computrabajo.com.ar/bt-ofr-SC004-1.htm'],self.name))
        self.categories.append(Category('Economia/Contabilidad',   ['http://www.computrabajo.com.ar/bt-ofr-SC005-1.htm'],self.name))
        self.categories.append(Category('Educacion/Universidad',   ['http://www.computrabajo.com.ar/bt-ofr-SC006-1.htm'],self.name))
        self.categories.append(Category('Hosteleria/Turismo',      ['http://www.computrabajo.com.ar/bt-ofr-SC007-1.htm'],self.name))
        self.categories.append(Category('Ingenieria/Tecnico',      ['http://www.computrabajo.com.ar/bt-ofr-SC008-1.htm'],self.name))
        self.categories.append(Category('Legal/Asesoria',          ['http://www.computrabajo.com.ar/bt-ofr-SC009-1.htm'],self.name))
        self.categories.append(Category('Medicina/Salud',          ['http://www.computrabajo.com.ar/bt-ofr-SC010-1.htm'],self.name))
        self.categories.append(Category('Recursos Humanos',        ['http://www.computrabajo.com.ar/bt-ofr-SC011-1.htm'],self.name))
        self.categories.append(Category('Otros',                   ['http://www.computrabajo.com.ar/bt-ofr-SC012-1.htm'],self.name))
    

managerZone= ManagerZone()

class PageCrawled():
    
    def __init__(self,url,urlListPattern,urlNewsPattern,category,title,clientMongo):
        self.url=url
        self.urlListPattern = urlListPattern
        self.urlNewsPattern = urlNewsPattern
        self.category = category
        self.clientMongo=clientMongo
        self.title = title
    
    def addText(self,text):
        self.text = text
        
    def setTitle(self,title):
        self.title = title
        
    def getCategory(self):
        return self.category
    
    def getUrl(self):
        return self.url
    
    def addTags(self,tags):
        self.tags = tags
        
    def persist(self):
        self.clientMongo.addPageCrawler(self.__tojson__())
    
    def __tojson__(self):
        currentDateTime= unicode(datetime.datetime.now().strftime("%A %B %d, %Y"))
        zones = [zone.__tojson__() for zone in managerZone.matchZone(self.text)]
        subZones = [subZone.__tojson__() for subZone in managerZone.matchSubZone(self.text)]
        return  {"_id": self.url,
                 "title":self.title,
                 "url": self.url,
                 "tags":self.tags,
                 "category":self.category.__str__(),
                 "cat" : defaultfilters.slugify(self.category.__str__()),
                 "provider":self.category.provider,
                 "creation_date":currentDateTime,
                 "date" : defaultfilters.slugify(currentDateTime),
                 "zones": zones,
                 "szones": subZones,
                 "content":self.text}
    
class Category():
    
    def __init__(self,name,urls,provider):
        self.name = name
        self.urls = urls
        self.provider=provider
    def getUrls(self):
        return self.urls
    
    def __str__( self ):
        return self.name
    
    def __tojson__(self,url):
        #''' ,
        return  {"name": self.name,"_id": url}

class DatePage():
    
    def __init__(self,name,url):
        self.name = name
        self.url = url
        
    def __tojson__(self):
        #''' ,
        return  {"date":unicode(datetime.date.today()),"_id": self.url}
 
class CrawlerActor(ThreadingActor):
    def __init__(self):
        super(CrawlerActor, self).__init__()

    def crawl(self,pages,depth=2):
        for i in range(depth):
            ignoredUrls = list()
            newpages=set()
            for page in pages:
                try:
                    c=urllib2.urlopen(page.url)
                except:
                    logging.info("Could not open %s" % page.url)
                    continue
                soup=BeautifulSoup(c.read( ))
                links=soup('a')
                for link in links:
                    if ('href' in dict(link.attrs)):
                        url=urljoin(page.url,link['href'])
                        if url.find("'")!=-1: continue
                        url=url.split('#')[0] # remove location portion
                        if url[0:4]=='http' and not url in ignoredUrls:
                            ignoredUrls.append(url)
                            if url.find(page.urlListPattern)!=-1:
                                #print "Url list %s" % page.url
                                newpages.add(PageCrawled(url,
                                                         page.urlListPattern,
                                                         page.urlNewsPattern,
                                                         page.category,
                                                         soup.title.string,
                                                         page.clientMongo))
                            if url.find(page.urlNewsPattern)!=-1:
                                try:
                                    c=urllib2.urlopen(url)
                                except:
                                    logging.warning("Could not open %s" % url)
                                    continue
                                logging.info(  "Job ad from next URL %s" % url)
                                self.addToIndex(PageCrawled(url,
                                                            page.urlListPattern,
                                                            page.urlNewsPattern,
                                                            page.category,
                                                            soup.title.string,
                                                            page.clientMongo),
                                                            c.read( ))
            pages=newpages
            logging.info( "The following %s was crawled." % page.url)

    def addToIndex(self,page,html):
        soup = BeautifulSoup(html)
        text=self.getTextOnly(soup)
        words=self.separateWords(text)
        # Get the URL id
        tags = list()
        # Link each word to this url
        for i in range(len(words)):
            word=words[i]
            if word in ignorewords: continue
            tags.append(word)
        page.addTags(tags)
        page.setTitle(soup.title.string)
        page.addText(text)
        page.persist()
        
        logging.info("Job Ad was added on MONGO. " +  unicode(datetime.datetime.now()))
            # Extract the text from an HTML page (no tags)
    
    def getTextOnly(self,soup):
        comments = soup.findAll(text=lambda text:isinstance(text, Comment))
        [comment.extract() for comment in comments]
        page = ''.join(soup.findAll(text=True))
        page = ' '.join(page.split())
        beginIndex = page.find("//-->")
        endIndex = page.find("Correo-E:")
        content = page[beginIndex+5:endIndex]
        beginIndex = content.find("//-->")
        return content[beginIndex+5:endIndex]
    # Separate the words by any non-whitespace character
    def separateWords(self,text):
        import re
        splitter=re.compile('\\W*')
        return [s.lower( ) for s in splitter.split(text) if s!='']
    # Return true if this url is already indexed
      

def stripHtmlTags(htmlTxt):
    if htmlTxt is None:
        return None
    else:
        return ''.join(BeautifulSoup(htmlTxt).findAll(text=True)) 

    
        