# disponivel no pacote de programas como: aranha.py
import networkx as NX
from BeautifulSoup import SoupStrainer, BeautifulSoup as BS
from BeautifulSoup import BeautifulStoneSoup as XS
import sys, os, urllib2, urllib, re
from sqlobject import  *

laracnadir = os.path.expanduser('~/.laracna')
if not os.path.exists(laracnadir):
    os.mkdir(laracnadir)
sqlhub.processConnection = connectionForURI('sqlite://'+laracnadir+'/knowdb')
global nomeatual, langatual, urlatual
nomeatual = ''
langatual = ''
urlatual = ''
class Ideia(SQLObject):
    nome = UnicodeCol()
    nlinks = IntCol()
    links = PickleCol()
    ender = StringCol()
    
class Crawler:
    def __init__(self,starturl,depth):
        try:
            Ideia.createTable()
        except:
            pass
        self.SU = starturl
        self.depth = depth
        self.fila=[]
        self.depth = depth
        self.curdepth = 0
        self.started = 0
        self.nlinks = 0
        self.history = []
        self.G = NX.Graph()
    def parsePag(self,urlend):
        urlatual = urlend
        user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
        values = {'name' : 'John  Smith',
          'location' : 'Northampton',
          'language' : 'Python' }
        headers = { 'User-Agent' : user_agent }
        data = urllib.urlencode(values)
        print "Abrindo ", urlend
        req = urllib2.Request(urlend,data,headers)
        fd = urllib2.urlopen(req)
        html = fd.read()
        return html
    
    def verResp(self,html):
        '''
        Verifica se resposta e um hit ou nao
        '''
        lnkart = SoupStrainer('a', href=re.compile('^/wiki/*'))
        artlist =  [tag['href'] for tag in BS(html, parseOnlyThese=lnkart)]
        if artlist[0].endswith('Disambig.svg'):
            self.fila.append('http://'+langatual+'.wikipedia.org'+artlist[3])
            self.curlinks = artlist
        else:
            self.curlinks = artlist
            Ideia(nome=nomeatual,nlinks = len(artlist), links = artlist,ender = urlatual)
            self.G.add_edges_from([(nomeatual,i) for i in self.curlinks])
            if self.curdepth > self.depth:
                return
            self.fila.extend(['http://'+langatual+'.wikipedia.org' + i for i in artlist])
            self.curdepth +=1
            
    def move(self):
        if not self.fila:
            if not self.started:
                self.fila.append(self.SU)
        while self.fila:
            self.started = 1
            urlatual = self.fila.pop(0)
            nomeatual = urlatual.split('/')[-1]
            if ":" in nomeatual: continue
            if nomeatual in ['Main_page']+self.history:continue
            print "buscando ", nomeatual,
            print "Faltam ", len(self.fila)
            try:
                html = self.parsePag(urlatual)
            except:
                continue
            self.verResp(html)
            self.nlinks +=1
            self.history.append(nomeatual)
            
        
class UrlFac:
    def __init__(self, lang='en'):
        global langatual
        self.lang = lang
        langatual = lang
    def urlifica(self,palavra):
        nomeatual = palavra
        u = "http://"+self.lang+".wikipedia.org/wiki/"+palavra
        urlatual = u
        return u
   
if __name__=="__main__":
    UF = UrlFac('pt')
    u = UF.urlifica(sys.argv[1])
    Cr = Crawler(u,1)
    Cr.move()
