import urllib2
import re
from BeautifulSoup import BeautifulSoup 
from urlparse import urljoin

from pysqlite2 import dbapi2 as sqlite

import sn
import dbu

stopwords = set(['the', 'of', 'to', 'and', 'a', 'an', 'in', 'is', 'it']);

class searchindexdb(dbu.db):
  def __init__(self, dbname):
    dbu.db.__init__(self, dbname)

  def querypagerank(self, urlid):
    return self.queryone('pagerank', 'score', 'urlid=%d' % urlid)[0]

class crawler:
  def __init__(self, dbname):
    self.db = searchindexdb(dbname)
    self.nn = sn.searchnet('nn.db')

  def __del__(self):
    pass

  def getentryid(self, table, field, value, create_new = True):
    res = self.db.queryone(table, 'rowid', '%s = "%s"' % (field, value))
    if res == None:
      return self.db.insert(table, field, '"%s"' % value).lastrowid
    else:
      return res[0]

  def addtoindex(self, url, soup):
    if self.isindexed(url): return

    print ('Indexing: ' + url)

    text = self.gettextonly(soup)
    words = self.separatewords(text)

    urlid = self.getentryid('urllist', 'url', url)
    for i in range(len(words)):
      word = words[i]
      if word in stopwords: continue
      wordid = self.getentryid('wordlist', 'word', word)
      self.db.insert('wordlocation', 'urlid, wordid, location',
                     '%d, %d, %d' % (urlid, wordid, i))

  def gettextonly(self, soup):
    v = soup.string
    if v == None:
      c = soup.contents
      resulttext = ''
      for t in c:
        subtext = self.gettextonly(t)
        resulttext += subtext + '\n'
      return resulttext
    return v.strip()

  def separatewords(self, text):
    splitter = re.compile('\\W*')
    return [s.lower() for s in splitter.split(text) if s != '']

  # an url is indexed only if it exists in both urllist and wordlocation
  def isindexed(self, url):
    u = self.db.queryone('urllist', 'rowid', 'url = "%s"' % url)
    if u == None: return False

    v = self.db.queryone('wordlocation', '*', 'urlid = %d' % u[0])
    if v == None: return False

    print('Hit:      ' + url)
    return True

  def addlinkref(self, urlfrom, urlto, linktext):
    words = self.separatewords(linktext)
    fromid = self.getentryid('urllist', 'url', urlfrom)
    toid = self.getentryid('urllist', 'url', urlto)
    if fromid == toid: return
    cur = self.db.insert('link', 'fromid, toid', '%d, %d' % (fromid, toid))
    linkid = cur.lastrowid
    for word in words:
      if word in stopwords: continue
      wordid = self.getentryid('wordlist', 'word', word)
      self.db.insert('linkwords', 'linkid, wordid', '%d, %d' % (linkid, wordid))

  def printpages(self, pages):
    print('-------------------------------------')
    for page in pages:
      print(page)
    print('-------------------------------------')


  def retrievelinks(self, page):
    try:
      req = urllib2.Request(page, headers={ 'User-Agent' : 'Magic Browser' })
      c = urllib2.urlopen(req, timeout=2)
    except urllib2.URLError, e:
      print('---Could not open %s, Error: %s' % (page, e.reason))
      return list()
    except IOError, e:
      print('---Could not open ' + page)
      print(e)
      return list()

    content_type = c.info()['Content-Type']
    if content_type.find('text/html') == -1: 
      print("+++None 'text/html' page: " + page)
      print(c.info())
      return list()

    try:
      soup = BeautifulSoup(c.read())
    except IOError, e:
      print('---Could not read ' + page)
      print(e)
      return list()

    self.addtoindex(page, soup)
    return soup('a')

  def forkurl(self, page, link):
    if 'href' not in dict(link.attrs): return ''

    url = urljoin(page, link['href'])
    if url.find("'") != -1:
      print('***Except:%s' % url)
      return ''
    return url.split('#')[0]

  def crawlpages(self, pages):
    newpages = set()
    for page in pages:
      links = self.retrievelinks(page)
      for link in links:
        url = self.forkurl(page, link)
        if url.startswith('http'):
          if self.isindexed(url): continue

          newpages.add(url)

          # there can be imcomplete linkref, as url may has not been
          # indexed yet
          linktext = self.gettextonly(link)
          self.addlinkref(page, url, linktext)
      self.db.commit()
    return newpages

  def crawl(self, pages, depth = 2):
    for i in range(depth):
      print('depth %d' % i)
      #self.printpages(pages)
      pages = self.crawlpages(pages)

  def createindextables(self):
    self.db.createtables(['urllist(url)', 
                          'wordlist(word)', 
                          'wordlocation(urlid, wordid, location)', 
                          'linkwords(wordid, linkid)', 
                          'link(fromid integer, toid integer)']) 
    self.db.createindices(['wordidx on wordlist(word)', 
                           'urlidx on urllist(url)', 
                           'wordurlidx on wordlocation(wordid)',
                           'urltoidx on link(toid)', 
                           'urlfromidx on link(fromid)'])
  
  def calculatepagerank(self, iterations = 20):
    self.db.droptable('pagerank')
    self.db.createtable('pagerank(urlid primary key, score)')
    self.db.insertfromtable('pagerank', 'rowid, 1.0', 'urllist')
    self.db.commit()

    urls = [urlid for (urlid,) in self.db.queryall('urllist', 'rowid')]
    n_urls = len(urls) * 1.0

    for i in range(iterations):
      print("Iterations %d" % i)
      counter = 0
      for (urlid,) in self.db.queryall('urllist', 'rowid'):
        pr = 0.15
        for (linker,) in self.db.query('link', 'fromid', 'toid=%d' % urlid, 'distinct'):
          linkingpr = self.db.querypagerank(linker)
          linkingcount = self.db.queryone('link', 'count(*)', 'fromid=%d' % linker)[0]
          pr += 0.85 * (linkingpr / linkingcount)

        counter += 1
        if (counter >= 0.1 * n_urls):
          print('10 percents complete')
          counter = 0
        self.db.update('pagerank', 'score=%f' % pr, 'urlid=%d' % urlid)
        self.db.commit()

class searcher:
  def __init__(self, dbname):
    self.db = searchindexdb(dbname)

  def __del__(self):
    pass

  def getmatchrows(self, q):
    fieldlist = 'w0.urlid'
    tablelist = ''
    clauselist = ''
    wordids = []

    words = q.split(' ')
    tablenumber = 0

    for word in words:
      wordrow = self.db.queryone('wordlist', 'rowid', 'word="%s"' % word)
      if wordrow != None:
        wordid = wordrow[0]
        wordids.append(wordid)
        if tablenumber > 0:
          tablelist += ', '
          clauselist += ' and '
          clauselist += ('w%d.urlid=w%d.urlid and ' 
                         % (tablenumber-1, tablenumber))
        fieldlist += ', w%d.location' % tablenumber
        tablelist += 'wordlocation w%d' % tablenumber
        clauselist += 'w%d.wordid=%d' % (tablenumber, wordid)
        tablenumber += 1

    fullquery = ('select %s from %s where %s' 
                 % (fieldlist, tablelist, clauselist))
    print(fullquery)

    cur = self.db.query(tablelist, fieldlist, clauselist)
    rows = [row for row in cur]

    return rows, wordids
  
  def getscoredlist(self, rows, wordids):
    totalscores = dict([(row[0], 0) for row in rows])

    weights = [(1.0, self.frequencyscore(rows)), 
               (1.0, self.locationscore(rows)), 
               (1.0, self.pagerankscore(rows)),
               (1.0, self.distancescore(rows)),
               (1.0, self.linktextscore(rows, wordids))]

    for (weight, scores) in weights:
      for url in totalscores:
        totalscores[url] += weight * scores[url]

    return totalscores

  def normalizescores(self, scores, small_isbetter = False):
    vsmall = 0.00001
    if small_isbetter:
      minscore = min(scores.values())
      return dict([(url, float(minscore)/max(vsmall, score)) 
                   for (url, score) in scores.items()])
    else:
      maxscore = max(scores.values())
      if maxscore == 0: maxscore = vsmall
      return dict([(url, float(score)/maxscore) 
                  for (url, score) in scores.items()])
  
  def frequencyscore(self, rows):
    counts = dict([(row[0], 0) for row in rows])
    for row in rows: counts[row[0]] += 1
    return self.normalizescores(counts)

  def locationscore(self, rows):
    locations = dict([(row[0], 1000000) for row in rows])
    for row in rows:
      # a query(consits of words) may exists in a page in different places,
      # find the closest location to the begining of the page,
      # and use it as the score
      location = sum(row[1:])
      if location < locations[row[0]]: locations[row[0]] = location
    return self.normalizescores(locations, small_isbetter = True)

  def distancescore(self, rows):
    if len(rows[0]) <= 2: return dict([(row[0], 1.0) for row in rows])
    mindistance = dict([(row[0], 1000000) for row in rows])
    for row in rows:
      dist = sum([abs(row[i] - row[i-1]) for i in range(2, len(row))])
      if dist < mindistance[row[0]]: mindistance[row[0]] = dist
    
    return self.normalizescores(mindistance, small_isbetter = True)

  def inboundlinkscore(self, rows):
    uniqueurls = set([row[0] for row in rows])
    inboundcount = dict(
        [(u, self.db.queryone('link', 'count(*)', 'toid=%d' % u)[0]) for u in uniqueurls])
    return self.normalizescores(inboundcount)

  def pagerankscore(self, rows):
    pageranks = dict([(row[0], self.db.querypagerank(row[0])) for row in rows])
    
    return self.normalizescores(pageranks)

  def linktextscore(self, rows, wordids):
    linkscores = dict([(row[0], 0) for row in rows])
    for wordid in wordids:
      cur = self.db.query('linkwords, link', 'link.fromid, link.toid', 'wordid=%d and linkwords.linkid=link.rowid' % wordid)
      # for each link[fromid, toid] contains word
      for (fromid, toid) in cur:
        if toid in linkscores: #page[toid] contains query words, one of which is word
          pr = self.db.querypagerank(fromid)
          linkscores[toid] += pr
    return self.normalizescores(linkscores)

  def neuralnetscore(self, rows, wordids):
    urlids = [urlid for urlid in set([row[0] for row in rows])]
    res = self.nn.getresult(wordids, urlids)
    scores = dict([(urlids[i], res[i]) for i in range(len(urlids))])
    return self.normalizescores(scores)

  def geturlname(self, id):
    return self.db.queryone('urllist', 'url', 'rowid=%d' % id)[0]

  def query(self, q):
    rows, wordids = self.getmatchrows(q)
    scores = self.getscoredlist(rows, wordids)
    rankedscores = sorted([(score, url) for (url, score) in scores.items()],
                          reverse = 1)
    for (score, urlid) in rankedscores[0:10]:
      print('%f\t%s' % (score, self.geturlname(urlid)))
    return wordids, [r[1] for r in rankedscores[0:10]]
