# -*- encoding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from sqlite3 import dbapi2 as sqlite
import requests
import nn
from BeautifulSoup import *
from urlparse import urljoin

class crawler:
    # 初始化crawler类并传入数据库名称
    def __init__(self,dbname):
        self.con = sqlite.connect(dbname)

    def __del__(self):
        self.con.close()

    def dbcommit(self):
        self.con.commit()


    # 辅助函数，用于获取条目的id，并且如果条目不存在，就将其加入数据库中
    def getentryid(self,table,field,value,createnew=True):
        cur = self.con.execute(
            "select rowid from %s where %s = '%s'" % (table,field,value))
        res = cur.fetchone()
        if res==None:
            cur= self.con.execute(
                "insert into %s (%s) values ('%s')" % (table,field,value))
            return cur.lastrowid
        else:
            return res[0]

    # 为每个网页建立索引
    def addtoindex(self,url,soup):
        if self.isindexed(url): return
        print 'Indexing %s' % url
        # 获取每个单词
        text = self.gettextonly(soup)
        words = self.separatewords(text)

        # 得到URL的id
        urlid=self.getentryid('urllist','url',url)

        # 将每个单词与该url关联
        for i in range(len(words)):
            word=words[i]
            # if word in ignorewords:continue
            wordid = self.getentryid('wordlist','word',word)
            self.con.execute("insert into wordlocation(urlid,wordid,location) values (%d,%d,%d)" % (urlid,wordid,i))


    # 从一个HTML网页中提取文字（不带标签）
    def gettextonly(self,soup):
        v=soup.string
        if v==None:
            c=soup.contents
            resulttext=''
            for t in c:
                subtext=self.gettextonly(t)
                resulttext+=subtext+'\n'
            return resulttext
        else :
            return v.strip()

    # 根据任何非空白字符进行分词处理(利用正则表达式来进行分词)(\\W*表示匹配任何非字母数字)
    def separatewords(self,text):
        splitter = re.compile('\\W*')
        return [s.lower() for s in splitter.split(text) if s!='']

    # 如果已经建立过索引，则返回true
    def isindexed(self,url):
        u=self.con.execute \
            ("select rowid from urllist where url = '%s'" % url).fetchone()
        if u!=None:
            # 检查它是否已经被检索过了
            v=self.con.execute(
                'select * from wordlocation where urlid=%d' % u[0]).fetchone()
            if v!=None : return True
        return False

    # 添加一个关联两个网页的链接
    def addlinkref(self,urlFrom,urlTo,linkText):
        words=self.separatewords(linkText)
        fromid=self.getentryid('urllist','url',urlFrom)
        toid=self.getentryid('urllist','url',urlTo)
        if fromid==toid: return
        cur = self.con.execute('insert into link(fromid,toid) values(%d,%d)' % (fromid,toid))
        linkid=cur.lastrowid
        for word in words:
            # if word in ignorewords: continue
            wordid = self.getentryid('wordlist','word',word)
            self.con.execute('insert into linkwords(wordid,linkid) values(%d,%d)' % (wordid,linkid))

    # 从一小组网页开始进行广度优先搜索，直至某一给定深度
    # 期间为网页建立索引
    def crawl(self,pages,depth=2):
        for i in range(depth):
            newpages=set()
            for page in pages:
                try:
                    c=requests.get(page)
                except:
                    print "Could not open %s " %page
                    continue
                # c.text返回全文内容，所以<head>内容也被抓取，影响到基于内容的排名评价方法
                soup=BeautifulSoup(c.text)
                self.addtoindex(page,soup)

                links=soup('a')
                for link in links:
                    if('href' in dict(link.attrs)):
                        url=urljoin(page,link['href'])
                        if url.find("'")!=-1 :continue
                        url = url.split('#')[0]
                        if url[0:4] == 'http' and not self.isindexed(url):
                            newpages.add(url)
                        # linkText表示获取a链接内的单词字段
                        linkText=self.gettextonly(link)
                        self.addlinkref(page,url,linkText)

                self.dbcommit()
            pages=newpages
                        
    # 为每个网页设立PageRank值
    def calculatepagerank(self,iterations=20):
        # 清除当前的PageRank表
        self.con.execute('drop table if exists pagarank')
        self.con.execute('create table pagerank (urlid primary key,score)')

        # 初始化每个url,默认其PageRank值为1
        self.con.execute('insert into pagerank select rowid,1.0 from urllist')
        self.dbcommit()

        for i in range(iterations):
            print "Iteration %d " % (i)
            for (urlid,) in self.con.execute('select rowid from urllist'):
                pr=0.15

                # 循环遍历指向当前网页的所有其他网页
                for (linker,)in self.con.execute(
                'select distinct fromid from link where toid=%d' % urlid):
                    # 得到链接源对应网页的PageRank值
                    linkingpr=self.con.execute(
                    'select score from pagerank where urlid=%d' % linker).fetchone()[0]

                    # 根据链接源，求得总的连接数
                    linkingcount=self.con.execute(
                    'select count(*) from link where fromid=%d' % linker).fetchone()[0]
                    pr+=0.85*(linkingpr/linkingcount)
                self.con.execute(
                'update pagerank set score=%f where urlid=%d' % (pr,urlid))
            self.dbcommit()


    # 创建数据表
    def createindextables(self):
        self.con.execute('create table urllist(url)')
        self.con.execute('create table wordlist(word)')
        self.con.execute('create table wordlocation(urlid,wordid,location)')
        self.con.execute('create table link(fromid integer,toid integer)')
        self.con.execute('create table linkwords(wordid,linkid)')
        self.con.execute('create index wordidx on wordlist(word)')
        self.con.execute('create index urlidx on urllist(url)')
        self.con.execute('create index wordurlidx on wordlocation(wordid)')
        self.con.execute('create index urltoidx on link(toid)')
        self.con.execute('create index urlfromidx on link(fromid)')



# 搜索引擎部分，用于搜索
class searcher:
    def __init__(self,dbname):
        self.con=sqlite.connect(dbname)
        self.mynet = nn.searchnet('nn.db')

    def __del__(self):
        self.con.close()

    def dbcommit(self):
        self.con.commit()

    # 查询函数
    def getmatchrows(self,q):
        # 构造查询的字符串
        fieldlist='w0.urlid'
        tablelist=''
        clauselist=''
        wordids=[]

        # 根据空格拆分单词
        words=q.split(' ')
        tablenumber=0

        for word in words:
            # 获取单词的ID
            wordrow=self.con.execute(
                "select rowid from wordlist where word='%s'" % word).fetchone()
            if wordrow!=None:
                wordid=wordrow[0]
                wordids.append(wordid)
                if tablenumber>0:
                    tablelist+=','
                    clauselist+=' and '
                    clauselist+='w%d.urlid=w%d.urlid and ' % (tablenumber-1,tablenumber)
                fieldlist+=',w%d.location' % tablenumber
                tablelist+='wordlocation w%d' % tablenumber
                clauselist+='w%d.wordid=%d' % (tablenumber,wordid)
                tablenumber+=1

        # 根据各个组分，建立查询
        fullquery='select %s from %s where %s ' % (fieldlist,tablelist,clauselist)
        cur = self.con.execute(fullquery)
        rows=[row for row in cur]

        # rows返回[urlid,单词位置1,...] wordids返回[wordid1,wordid2]
        return rows,wordids

    # 将搜索结果予以评价值(重要)
    def getscoredlist(self,rows,wordids):
        # row[0]保存的urlid
        totalscores=dict([row[0],0] for row in rows)

        # 此处是稍后放置评价函数的地方(评价方法可以叠加，并根据需要对不同方法进行加权组合)
        # 使用单词频度评价方法的情况
        # weights=[(1.0,self.frequencyscore(rows))]
        # 使用文档位置评价方法的情况
        # weights=[(0.5,self.locationscore(rows))]
        # 使用单词举例评价方法的情况
        # weights=[(1.0,self.distancescore(rows))]
        # 评价方法加权叠加的情况
        # weights=[(1.5,self.frequencyscore(rows)),(1.0,self.locationscore(rows)),(0.5,self.distancescore(rows))]
        # 使用PageRank评价方法的情况
        # weights=[(1.0,self.pagerankscore(rows))]
        # 使用文本链接评价方法的情况
        # weights=[(1.0,self.pagerankscore(rows)),(0.5,self.linktextscore(rows,wordids))]
        # 使用神经网络评价（BP算法）
        weights=[(1.0,self.nnscore(rows,wordids))]

        for (weight,scores)in weights:
            for url in totalscores:
                totalscores[url]+=weight*scores[url]

        return totalscores

    # 获取urlname
    def geturlname(self,id):
        return self.con.execute("select url from urllist where rowid=%d" % id).fetchone()[0]

    # 执行查询
    def query(self,q):
        rows,wordids=self.getmatchrows(q)
        scores = self.getscoredlist(rows,wordids)
        rankedscores=sorted([(score,url) for (url,score) in scores.items()],reverse=1)
        for (score,urlid) in rankedscores[0:10]:
            # print score
            print '%f\t%s' % (score,self.geturlname(urlid))
        return wordids,[r[1] for r in rankedscores[0:10]]

    # 归一化函数，对各个评价方法的而结果做处理工作，即统一评价结果
    def normalizescores(self,scores,smallIsBetter=0):
        vsmall=0.00001 # 避免被整除
        if smallIsBetter:
            # 经评价方法评价后再与评分榜上最低评分之比作为最终得分（保证区间为0,1）
            minscore=min(scores.values())
            return dict([(u,float(minscore)/max(vsmall,l)) for (u,l) in scores.items()])
        else:
            # 与上述情况相反
            maxscore=max(scores.values())
            if maxscore==0: maxscore=vsmall
            return dict([(u,float(c)/maxscore) for (u,c) in scores.items()])

    # 以下介绍的是所有基于内容的排名评价方法（包含单词频度、文档位置、单词举例）
    # 单词频度，即查询条件中以单词在网页中出现的次数对网页进行评价
    def frequencyscore(self,rows):
        counts=dict([(row[0],0) for row in rows])
        for row in rows: counts[row[0]]+=1
        return self.normalizescores(counts)

    # 文档位置，即搜索单词在网页中的位置，通常网页与带搜索的单词相关则更可能在靠近网页开始出的位置出现
    def locationscore(self,rows):
        locations = dict([(row[0],1000000) for row in rows])
        for row in rows:
            loc = sum(row[1:])
            if loc< locations[row[0]]:locations[row[0]]=loc
        return self.normalizescores(locations,smallIsBetter=1)

    # 单词举例，当查询中包含多个单词时，寻找单词彼此间距很紧的网页往往是很有意义的
    # 与locationscore非常类似，区别在于当函数循环遍历单词的位置时，他会计算出每个位置与上一个位置间的差距
    def distancescore(self,rows):
        # 如果只有一个单词，则得分都一样
        if len(rows[0])<=2 :return dict([(row[0],1.0) for row in rows])

        # 初始化字典，并填入一个很大的数
        mindistance=dict([(row[0],1000000) for row in rows])

        for row in rows:
            dist=sum([abs(row[i]-row[i-1]) for i in range(2,len(row))])
            if dist < mindistance[row[0]] : mindistance[row[0]]=dist
        return self.normalizescores(mindistance,smallIsBetter=1)

    # 以下算法是利用外部回指链接进行的算法
    # 简单计数,在每个网页上统计链接的数目,并将链接总数作为针对网页的度量.
    def inboundlinkscore(self,rows):
        uniqueurls = set([row[0] for row in rows])
        inboundcount=dict([(u,self.con.execute( \
            'select count(*) from link where toid=%d' % u).fetchone()[0]) \
                           for u in uniqueurls])
        return self.normalizescores(inboundcount)

    # PageRank,pr(a) = 0.15(最小值)+0.85(阻尼因子)*(各网站的(pr/link(链接数)))
    def pagerankscore(self,rows):
        pageranks=dict([(row[0],self.con.execute('select score from pagerank where urlid=%d' % row[0]).fetchone()[0])
                        for row in rows])
        maxrank=max(pageranks.values())
        normalizedscores=dict([(u,float(l)/maxrank) for (u,l) in pageranks.items()])
        return normalizedscores

    # 利用链接文本评价，根据指向某一网页的链接文本来决定网页的相关程度
    # 一个网页，如果拥有大量来自其他重要网页的链接指向，且这些网页又满足查询条件，则该网页将会得到一个很高的评价值。
    # 在查询结果中，有许多网页不具备包含有效文本的链接，其所得分值将为0
    def linktextscore(self,rows,wordids):
        linkscores=dict([(row[0],0) for row in rows])
        for wordid in wordids:
            # 找出与单词有关的链接源以及目标链接(fromid,toid)
            cur = self.con.execute('select link.fromid,link.toid from linkwords,link where linkwords.linkid=link.rowid \
                                   and wordid=%d' % (wordid))
            for (fromid,toid) in cur:
                # 如果源网页中包含检索单词的目标链接又正好在搜索结果的url列表中，则将源网页的pr值加入到目标链接的评价中
                if toid in linkscores:
                    pr = self.con.execute('select score from pagerank where urlid=%d' % fromid).fetchone()[0]
                    linkscores[toid]+=pr
        vsmall=0.0001
        maxscore=max(linkscores.values())
        if maxscore==0: maxscore=vsmall
        normalizedsocres=dict([(u,float(l)/maxscore) for (u,l) in linkscores.items()])
        return normalizedsocres

    # 利用神经网络进行评价
    def nnscore(self,rows,wordids):
        urlids = [urlid for urlid in set([row[0] for row in rows])]
        # 检测隐藏层节点，若没有建立，则建立
        self.mynet.generatehiddennode(wordids,urlids)
        nnres = self.mynet.getresult(wordids,urlids)
        scores = dict([(urlids[i],nnres[i]) for i in range(len(urlids))])
        # print scores
        return self.normalizescores(scores)

# c = crawler('searchindex.db')
# c.createindextables()
# c.calculatepagerank()
# pagelist=['http://kiwitobes.com/']
# c.crawl(pagelist)
# print [row for row in c.con.execute('select rowid from wordlocation where wordid=1')]
e=searcher('searchindex.db')
# print e.con.execute('select * from pagerank').fetchall()
# rows = e.con.execute("select wn.urlid,wn.wordid,wl.word from wordlist wl,wordlocation wn where wl.rowid=wn.wordid and wn.urlid=11")
# print [row for row in rows]
# row,ids=e.getmatchrows('programming data')
print e.query('right now')
# mynet = nn.searchnet('nn.db')
# print e.getscoredlist(row,ids)