import urllib2
import re
from BeautifulSoup import BeautifulSoup 
from urlparse import urljoin

from pysqlite2 import dbapi2 as sqlite

stopwords = set(['the', 'of', 'to', 'and', 'a', 'an', 'in', 'is', 'it']);

class crawler:
  def __init__(self, dbname):
    self.con = sqlite.connect(dbname)

  def __del__(self):
    self.con.close()

  def dbcommit(self):
    self.con.commit()

  def getentryid(self, table, field, value, create_new = True):
    cur = self.con.execute("select rowid from %s where %s = '%s'"
        % (table, field, value))
    res = cur.fetchone()
    if res == None:
      cur = self.con.execute("insert into %s (%s) values ('%s')"
          % (table, field, value))
      return cur.lastrowid
    else:
      return res[0]

  def addtoindex(self, url, soup):
    if self.isindexed(url): return

    print ('Indexing: ' + url)

    text = self.gettextonly(soup)
    words = self.separatewords(text)

    urlid = self.getentryid('urllist', 'url', url)
    for i in range(len(words)):
      word = words[i]
      if word in stopwords: continue
      wordid = self.getentryid('wordlist', 'word', word)
      self.con.execute('insert into wordlocation(urlid, wordid, location) \
          values (%d, %d, %d)' % (urlid, wordid, i))

  def gettextonly(self, soup):
    v = soup.string
    if v == None:
      c = soup.contents
      resulttext = ''
      for t in c:
        subtext = self.gettextonly(t)
        resulttext += subtext + '\n'
      return resulttext
    return v.strip()

  def separatewords(self, text):
    splitter = re.compile('\\W*')
    return [s.lower() for s in splitter.split(text) if s != '']

  # an url is indexed only if it exists in both urllist and wordlocation
  def isindexed(self, url):
    res = self.con.execute("select rowid from urllist where url = '%s'" % url)
    u = res.fetchone()
    if u != None:
      res = self.con.execute("select * from wordlocation where urlid = %d" % u[0])
      v = res.fetchone()
      if v != None: 
        print('Hit:      ' + url)
        return True
    return False

  def addlinkref(self, urlfrom, urlto, linktext):
    words = self.separatewords(linktext)
    fromid = self.getentryid('urllist', 'url', urlfrom)
    toid = self.getentryid('urllist', 'url', urlto)
    if fromid == toid: return
    cur = self.con.execute("insert into link(fromid, toid) values (%d, %d)"
        % (fromid, toid))
    linkid = cur.lastrowid
    for word in words:
      if word in stopwords: continue
      wordid = self.getentryid('wordlist', 'word', word)
      self.con.execute("insert into linkwords(linkid, wordid) values (%d, %d)"
          % (linkid, wordid))

  def printpages(self, pages):
    print('-------------------------------------')
    for page in pages:
      print(page)
    print('-------------------------------------')


  def retrievelinks(self, page):
    try:
      req = urllib2.Request(page, headers={ 'User-Agent' : 'Magic Browser' })
      c = urllib2.urlopen(req, timeout=2)
    except urllib2.URLError, e:
      print('---Could not open %s, Error: %s' % (page, e.reason))
      return list()
    except IOError, e:
      print('---Could not open ' + page)
      print(e)
      return list()

    content_type = c.info()['Content-Type']
    if content_type.find('text/html') == -1: 
      print("+++None 'text/html' page: " + page)
      print(c.info())
      return list()

    try:
      soup = BeautifulSoup(c.read())
    except IOError, e:
      print('---Could not read ' + page)
      print(e)
      return list()

    self.addtoindex(page, soup)
    return soup('a')

  def forkurl(self, page, link):
    if 'href' not in dict(link.attrs): return ''

    url = urljoin(page, link['href'])
    if url.find("'") != -1:
      print('***Except:%s' % url)
      return ''
    return url.split('#')[0]

  def crawlpages(self, pages):
    newpages = set()
    for page in pages:
      links = self.retrievelinks(page)
      for link in links:
        url = self.forkurl(page, link)
        if url.startswith('http'):
          if self.isindexed(url): continue

          newpages.add(url)

          # there can be imcomplete linkref, as url may has not been
          # indexed yet
          linktext = self.gettextonly(link)
          self.addlinkref(page, url, linktext)
      self.dbcommit()
    return newpages

  def crawl(self, pages, depth = 2):
    for i in range(depth):
      print('depth %d' % i)
      #self.printpages(pages)
      pages = self.crawlpages(pages)

  def createtable(self, schema):
    self.con.execute('create table ' + schema)
    
  def createindex(self, index):
    self.con.execute('create index ' + index)

  def createindextables(self):
    self.createtable('urllist(url)')
    self.createtable('wordlist(word)')
    self.createtable('wordlocation(urlid, wordid, location)')
    self.createtable('linkwords(wordid, linkid)')
    self.createtable('link(fromid integer, toid integer)')
    
    self.createindex('wordidx on wordlist(word)')
    self.createindex('urlidx on urllist(url)')
    self.createindex('wordurlidx on wordlocation(wordid)')
    self.createindex('urltoidx on link(toid)')
    self.createindex('urlfromidx on link(fromid)')

