# -*- coding: UTF-8 -*-
from bs4 import BeautifulSoup
from download import request ##导入模块变了一下

class ieeetgars():

    def __init__(self):
        from pymongo import MongoClient
        client = MongoClient() ##与MongDB建立连接（这是默认连接本地MongDB数据库）
        db = client['tgars'] ## 选择一个数据库
        self.tagrsabs_collection= db['tagrsabstract'] ##在meizixiezhenji这个数据库中，选择一个集合
        self.titles = [] ##用来保存文献标题
        self.abstracturls = [] ##初始化一个 列表 用来保存摘要的地址
    def saveabstract(self,absctract,title):
        import os
        isExists = os.path.exists('F:\Tgrs_abstract')
        if not isExists:
            print(u'建了一个名字叫做','Tgrs_abstract', u'的文件夹！')
            os.makedirs('F:\Tgrs_abstract')
        else:
            print(u'名字叫做','Tgrs_abstract', u'的文件夹已经存在了！')
        if absctract:
            try:
                print u'开始保存摘要：\n'
                name=title+'.txt'
                f=open(name,'w')
                f.write(absctract)
                f.close()
            except Exception, e:
                print repr(e)
    def getpaperid(self,url):

        start_html = request.get(url, 3)
        print start_html
        Soup = BeautifulSoup(start_html.text, 'lxml')

        # find filter rule of changing page
        absurl = Soup.find('a', class_="art-abs-url")
        filter = absurl['data-qrystr']
        print u'滤波规则：',filter
        # find the most page num
        pagenum = Soup.find('div', class_="pagination").find_all('a')[-2].get_text()
        print pagenum,u'数据类型：',type(pagenum)
        # 遍历所有页下载摘要
        for pagenumber in range(1, int(pagenum) + 1):
            nextpageurl = url + '&' + filter + '&pageNumber=' + str(pagenumber)
            print u'下一页的网址',nextpageurl
            self.absdown(nextpageurl)

    def absdown(self,url):
        import re
        import datetime
        paperurl = 'http://ieeexplore.ieee.org/document/{}/'
        html = request.get(url, 3)
        Soup = BeautifulSoup(html.text, 'lxml')
        #find the url of all paper in the current page
        all_a = Soup.find_all('a', class_="art-abs-url")
        # download all papers abstract
        for a in all_a:
            paperid = a['data-arnumber']
            abstracturl = paperurl.format(paperid)  # 文章的下载地址
            if self.tagrsabs_collection.find_one({'文献地址':abstracturl}):
                print u'这个摘要已经爬取过了\n'
            else:
                html = request.get(abstracturl,3)
                paper_Soup = BeautifulSoup(html.text, "html5lib")
                #正则表达式匹配摘要
                print u'开始匹配摘要：\n'
                soup_pattern=re.compile(r'\b(abstract).*doi\b')
                abstract=re.search(soup_pattern,paper_Soup.text)
                print 'abstract is ',abstract.group()
                title = paper_Soup.find('title').get_text()
                print title

                self.saveabstract(abstract.group(), title)
                self.titles.append(title)
                self.abstracturls.append(abstracturl)
                post = {  ##这是构造一个字典，里面有啥都是中文，很好理解吧！
                    '文献标题': self.titles,
                    '文献地址': self.abstracturls,
                    '获取时间': datetime.datetime.now()
                }
                print u'存入数据库：',post
                self.tagrsabs_collection.save(post)  ##将post中的内容写入数据库。
                print(u'插入数据库成功')

url='http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=36'
tgars=ieeetgars()
tgars.getpaperid(url)
# dlabstract('7893711')
# url='http://www.mzitu.com/all'



    # f = open(name + '.txt', 'ab')
    # f.write(abstract)
    # f.close()
