import urllib2
from BeautifulSoup import *
from urlparse import urljoin
from pymongo import Connection
from pymongo.objectid import ObjectId


import datetime as dt


ignorewords = set(['the', 'of', 'to', 'and', 'a', 'in', 'is', 'it'])


class crawler:

    def __init__(self, dbname):
        self.con = Connection()
        self.db = self.con[dbname]

    def __del__(self):
        self.con.close()

    
    def getentryid(self, collection, field, value, createnew=True):

        posts = self.db[collection].find_one({field:value})
                
        if posts == None:

            post = {field: value}

            _id = self.db[collection].insert(post)
            return _id
        else:
            return posts['_id']
        

    def addtoindex(self, url, soup):

        if self.isindexed(url): return
        
        print 'Indexing %s' %url

        text = self.gettextonly(soup)
        words = self.separatewords(text)

        urlid = self.getentryid('urllist', 'url', url)

        for i in range(len(words)):
            word = words[i]
            if word in ignorewords: continue
            wordid = self.getentryid('wordlist', 'word', word)

            post = {'urlid': str(urlid),
                    'wordid': str(wordid),
                    'location': i}
            
            self.db['wordlocation'].insert(post)
                        

    def gettextonly(self, soup):

        v = soup.string

        if v == None:
            c = soup.contents
            resulttext = ''

            for t in c:
                subtext = self.gettextonly(t)
                resulttext += subtext + '\n'

            return resulttext

        else:
            return v.strip()    


    def separatewords(self, text):

        splitter = re.compile('\\W*')

        return [s.lower() for s in splitter.split(text) if s != '']
        

    def isindexed(self, url):

        u = self.db['urllist'].find_one({'url': url})
        
        if u != None:
            v = self.db['wordlocation'].find_one({'urlid': str(u['_id'])})            
            if v != None: return True
            
        return False

    def addlinkref(self, urlFrom, urlTo, linkText):

        words = self.separatewords(linkText)
        fromid = self.getentryid('urllist', 'url', urlFrom)        
        toid = self.getentryid('urllist', 'url', urlTo)

        if fromid == toid: return

        post = {'fromid': str(fromid), 'toid': str(toid)}

        linkid = self.db['link'].insert(post)            
        # linkid = cur.lastrowid

        for word in words:
            if word in ignorewords: continue
            wordid = self.getentryid('wordlist', 'word', word)

            post = {'linkid': str(linkid), 'wordid': str(wordid)}
            
            self.db['linkwords'].insert(post)
                        
            
        

    def crawl(self, pages, depth=2):

        start = dt.datetime.now()

        for i in range(depth):
            newpages = set()

            for page in pages:
                try:
                    c = urllib2.urlopen(page)
                except:
                    print 'Could not open %s' % page
                    continue

                soup = BeautifulSoup(c.read())
                self.addtoindex(page, soup)

                links = soup('a')
                for link in links:
                    if 'href' in dict(link.attrs):
                        url = urljoin(page, link['href'])
                        if url.find("'") != -1: continue
                        url = url.split('#')[0]
                        if url[0:4] == 'http' and not self.isindexed(url):
                            newpages.add(url)
                        linkText = self.gettextonly(link)
                        self.addlinkref(page, url, linkText)

            pages = newpages

        end = dt.datetime.now()
        self.crawlingtime = end - start
                        

class searchengine():

    def __init__(self, dbname):
        self.con = Connection()
        self.db = self.con[dbname]
                

    def __del__(self):
        self.con.close()

    def getmatchrows(self, q):
        
        clauselist = ''
        wordids = []
        words = q.split(' ')

        p = None

        for word in words:
            w = self.db.wordlist.find_one({'word': '%s' %word})
            if w:
                wid = str(w['_id'])
                wordids.append(wid)
                
                if p == None:
                    p = self.db.wordlocation.find({'wordid':'%s' %wid})
                else:
                    urlids = []
                    for a in p:
                        urlids.append(a['urlid'])
		
                    p = self.db.wordlocation.find({'wordid':'%s' %wid, 'urlid': {'$in': urlids}})
        
                    
        rows = [row for row in p]

        return rows, wordids

    def getscoredlist(self, rows, wordids):
        
        totalscores = dict([(row['urlid'], 0) for row in rows ])
       
        weights = [(1.0, self.frequencyscore(rows))]

       
        for (weight, scores) in weights:
            for url in totalscores:
                totalscores[url] += weight * scores[url]

        return totalscores
                

    def geturlname(self, id):

        return self.db['urllist'].find_one({'_id': ObjectId(id)})        

    def query(self, q):

        start = dt.datetime.now()

        rows, wordids = self.getmatchrows(q)

        scores = self.getscoredlist(rows, wordids)

        rankedscores = sorted([(score, url) for (url, score) in scores.items()], reverse = 1)
        
        for (score, urlid) in rankedscores[0:10]:
            print '%f\t%s' %(score, self.geturlname(urlid))

        end = dt.datetime.now()

        print 'Query executed in %d sec' %(end-start).seconds

    def normalizescore(self, scores, smallIsBetter=0):

        vsmall = 0.00001

        if smallIsBetter:
            minscore = min(scores.values())

            return dict([(u, float(minscore)/max(vsmall, l))
                          for (u,l) in scores.items()])
        else:
            maxscore = max(scores.values())

            return dict([(u, float(c)/maxscore)
                          for (u,c) in scores.items()])


        
    def frequencyscore(self, rows):

        counts = dict([(row['urlid'], 0) for row in rows])
        

        for row in rows:
            counts[row['urlid']] += 1

        return self.normalizescore(counts)

    def locationscore(self, rows):

        locations = dict([(row[0], 1000000) for row in rows])

        for row in rows:
            loc = sum(row[1:])
            if loc < locations[row[0]]:
                locations[row[0]] = loc
        
        return self.normalizescore(locations, smallIsBetter=1)
