import urllib.request as request
import urllib.parse as parse
from bs4 import BeautifulSoup, element
from smysql import DB
import re,nn

class searcher(DB):
    mynet=nn.searchnet()

    def normalizescores(self,scores,smallIsBetter=0):
        vsmall=0.00001
        if smallIsBetter==1:
            minscore=min(scores.values())
            minscore=max(minscore,vsmall)
            return dict([(u,float(minscore)/max(vsmall,l)) for (u,l) in scores.items()])
        else:
            maxscore=max(scores.values())
            if maxscore==0:
                maxscore=vsmall
            return dict([(u,float(c)/maxscore) for (u,c) in scores.items()])


    def frequencyscore(self,rows):
        counts=dict([(row[0],0) for row in rows])
        for row in rows:
            counts[row[0]]+=1
        return self.normalizescores(counts)

    def locationscore(self,rows):
        locations=dict([(row[0],1000000) for row in rows])
        for row in rows:
            loc=sum(row[1:])
            if loc<locations[row[0]]:
                locations[row[0]]=loc
        return self.normalizescores(locations,smallIsBetter=1)

    def distancescore(self,rows):
        if len(rows[0])<=2:
            return dict([(row[0],1.0) for row in rows])
        mindistance=dict([(row[0],1000000) for row in rows])
        for row in rows:
            dist=sum([abs(row[i]-row[i-1]) for i in range(2,len(row))])
            if dist<mindistance[row[0]]:
                mindistance[row[0]]=dist
        return self.normalizescores(mindistance,smallIsBetter=1)

    def getmatchrows(self,q):
        fieldlist='w0.urlid'
        tablelist=''
        clauselist=''
        rows=[]
        wordids=[]

        words=q.split(' ')
        tablenumber=0

        for word in words:
            if word.encode('utf8').isalpha():
                sql="select id from wordlist where word='%s'"%word
            else:
                sql="select id from wordlist where word like '%%%s%%'"%word
            wordrow =self.execute(sql,True)
            if wordrow:
                wordid=[row[0] for row in wordrow]
                wordids.extend(wordid)
                if tablenumber>0:
                    tablelist+=','
                    clauselist+=' and '
                    clauselist+='w%d.urlid=w%d.urlid and '%(tablenumber-1,tablenumber)
                fieldlist+=',w%d.location'%tablenumber
                tablelist+='wordlocation as w%d'%tablenumber
                if len(wordid)>1:
                    clauselist+=('w%d.wordid in '%tablenumber)+str(tuple(wordid))
                else:
                    clauselist += 'w%d.wordid=%d ' %(tablenumber,wordid[0])
                tablenumber+=1

        fullquery='select %s from %s where %s'%(fieldlist,tablelist,clauselist)
        print(fullquery[:min(300,len(fullquery))])
        if tablenumber >0:
            rows = list(self.execute(fullquery,True))
        return rows,wordids

    def getscoredlist(self,rows,wordids):
        totalscores=dict([(row[0],0) for row in rows])

        weights=[(1.0,self.frequencyscore(rows)),(1.0,self.locationscore(rows)),
                 (2,self.distancescore(rows)),(4,self.pagerankscore(rows)),
                 (3,self.linktextscore(rows,wordids)),(3,self.nnscore(rows,wordids))]

        for (weight,scores) in weights:
            for url in totalscores:
                totalscores[url]+=weight*scores[url]

        return totalscores

    def geturlname(self,id):
        sql="select url from urllist where id=%d"%id
        return self.execute(sql)[0]

    def query(self,q):
        rows,wordids=self.getmatchrows(q)
        if not rows:
            print('nothing match!')
            return
        scores=self.getscoredlist(rows,wordids)
        rankedscores=sorted([(score,url) for (url,score) in scores.items()],reverse=1)
        for (score,urlid) in rankedscores[0:10]:
            print('%f\t%s'%(score,self.geturlname(urlid)))
        return wordids,[r[1] for r in rankedscores[:10]]

    def inboundlinkscore(self,rows):
        uniqueurls=set([row[0] for row in rows])
        sql="select count(*) from link where toid="
        inboundcount=[(u,self.execute(sql+str(u))[0]) for u in uniqueurls]
        return self.normalizescores(dict(inboundcount))

    def pagerankscore(self,rows):
        sql='select score from pagerank where urlid={0}'
        pagerank=dict([(row[0],self.execute(sql.format(row[0]))[0]) for row in rows])
        return self.normalizescores(pagerank)

    def linktextscore(self,rows,wordids):
        linkscore=dict([(row[0],0) for row in rows])
        for wordid in wordids:
            sql="select link.fromid,link.toid from linkwords,link where wordid=%d"\
                " and linkwords.linkid=link.id"%wordid
            for (fromid,toid) in self.execute(sql,True):
                if toid in linkscore:
                    pr=self.execute("select score from pagerank where urlid=%d"%fromid)[0]
                    linkscore[toid]+=pr
        return self.normalizescores(linkscore)

    def nnscore(self,rows,wordids):
        urlids=[urlid for urlid in set([row[0] for row in rows])]
        nnres=searcher.mynet.getresult(wordids,urlids)
        scores=dict([(urlids[i],nnres[i]) for i in range(len(urlids))])
        return self.normalizescores(scores)


class crawler(DB):
    ignorewords = ['html', 'the', 'of', 'to', 'and', 'a', 'in', 'is', 'it']

    def getentryid(self, table, field, value, createnew=True):
        sql = "select id from %s WHERE %s='%s'" % (table, field, value)
        res=self.execute(sql)
        if res is None:
            sql = "INSERT INTO %s (%s) VALUE ('%s')" % (table, field, value)
            return self.execute(sql,last=True)
        else:
            return res[0]

    def addtoindex(self, url, soup):
        if self.isindexed(url):
            return
        print('Indexing %s' % url)
        text = self.gettextonly(soup)
        words = self.separatewords(text)
        print(words)
        urlid = self.getentryid('urllist', 'url', url)
        for i in range(len(words)):
            word = words[i]
            if word in crawler.ignorewords:
                continue
            wordid = self.getentryid('wordlist', 'word', word)
            sql = "INSERT INTO wordlocation(urlid,wordid,location) VALUE (%d,%d,%d)" % (urlid, wordid, i)
            self.execute(sql)

    def gettextonly(self, soup):
        v = soup.string
        if v is None:
            c = soup.contents
            resulttext = ''
            for t in c:
                if t.name in ['style', 'script']:
                    continue
                subtext = self.gettextonly(t)
                resulttext += subtext + '\n'
            return resulttext
        else:
            if type(v) != element.NavigableString:
                return ''
            return v.strip()

    def separatewords(self, text):
        splitter = re.compile(r'\W+')
        return [s.lower() for s in splitter.split(text) if s and not s.isdigit()]

    def isindexed(self, url):
        sql = "select id from urllist WHERE url='%s'" % url
        u=self.execute(sql)
        if u:
            sql = "select * from wordlocation WHERE urlid=%d" % u[0]
            if self.execute(sql):
                return True
        return False

    def addlinkref(self, urlFrom, urlTo, linkText):
        urlFrom = self.getentryid('urllist', 'url', urlFrom)
        urlTo = self.getentryid('urllist', 'url', urlTo)
        sql="insert into link(fromid,toid) value(%d,%d)"%(urlFrom,urlTo)
        linkid=self.execute(sql,last=True)
        wordid=self.getentryid('wordlist','word',linkText)
        sql="insert into linkwords value(%d,%d)"%(wordid,linkid)
        self.execute(sql)


    def crawl(self, pages, depth=2):
        for i in range(depth):
            newpages = set()
            for page in pages:
                try:
                    c = request.urlopen(page)
                except:
                    print('Could not open %s' % page)
                    continue
                soup = BeautifulSoup(c.read(), 'lxml')
                page=page.rstrip('/')
                self.addtoindex(page, soup)
                links = soup('a')
                for link in links:
                    if 'href' in dict(link.attrs):
                        url = parse.urljoin(page, link['href'])
                        if url.find("'") != -1:
                            continue
                        url = url.split('#')[0]
                        url=url.rstrip('/')
                        if url[0:4] == 'http' and not self.isindexed(url):
                            newpages.add(url)
                        linkText = self.gettextonly(link)
                        self.addlinkref(page, url, linkText)
                self.commit()
            pages = newpages

    def createindextables(self):
        pass

    def calculatepagerank(self,iterations=20):
        self.execute("drop table if exists pagerank")
        self.execute("create table pagerank(urlid int primary key,score float)")
        self.execute("insert into pagerank select id,1.0 from urllist")
        self.commit()

        for i in range(iterations):
            print("iteration %d"%i)
            for (urlid,) in self.execute("select id from urllist",True):
                pr=0.15
                for (linker,) in self.execute("select distinct fromid from link where toid=%d"%urlid,True):
                    linkingpr=self.execute("select score from pagerank where urlid=%d"%linker)[0]
                    linkingcount=self.execute("select count(*) from link where fromid=%d"%linker)[0]
                    pr+=0.85*(linkingpr/linkingcount)
                self.execute("update pagerank set score=%f where urlid=%d"%(pr,urlid))
            self.commit()



# # crawler().crawl(['http://www.baidu.com'])
# # searcher().query('')
# # crawler().calculatepagerank()
# wWorld,wRiver,wBank=101,102,103
# uWorldBank,uRiver,uEarth=201,202,203
# mynet=nn.searchnet()
# # mynet.trainquery([wWorld,wBank],[uWorldBank,uRiver,uEarth],uWorldBank)
# # print(mynet.getresult([wWorld,wBank],[uWorldBank,uRiver,uEarth]))
# allurls=[uWorldBank,uRiver,uEarth]
# for i in range(30):
#     mynet.trainquery([wWorld,wBank],allurls,uWorldBank)
#     mynet.trainquery([wRiver,wBank],allurls,uRiver)
#     mynet.trainquery([wWorld],allurls,uEarth)
#
# print(mynet.getresult([wWorld,wBank],allurls))
# print(mynet.getresult([wRiver,wBank],allurls))
# print(mynet.getresult([wBank],allurls))

def test():
    mysearch=searcher()
    while True:
        str = input("请输入查询：")
        mysearch.query(str)
