from bs4 import BeautifulSoup
import urllib.parse
import requests
import pymongo
import urllib.parse

'''

关于爬取阿里云研究所的文章， 爬取的关于农村的文章，首先从起始页面上将每篇文章的url使用列表储存，然后利用循环
然后就是对每篇文章的内容的截取。

'''

def getMarks(str):
    for i in remark:
        remarksText = i.get_text()
        left = remarksText.find(str)
        right = remarksText.find('\n', left)
        s = remarksText[left +3:right]
        return s;
connection=pymongo.MongoClient()
tdb=connection.aliyun
post=tdb.nongcun
url_list = []
n=0
for page in range(1,14):
   url='http://www.aliresearch.com/blog/index/search/keywords/'+urllib.parse.quote('农村')+'/page/{}.html'.format(page)
   nongcun=requests.get(url)
   soup=BeautifulSoup(nongcun.text,'lxml')
   titles=soup.select('h3.text-more.media-heading > a')
   for title in titles:
       url1='http://www.aliresearch.com/'+title['href']
       url_list.append(url1) 

for url in url_list:
    nong=requests.get(url)
    soup=BeautifulSoup(nong.text,'lxml')
    titles=soup.select('div.common_block_title.clearfix > h2')
    wens=soup.select('section[id="contents"]')
    remark=soup.select(' span.pull-left > span')
    src=getMarks('来源')
    type=getMarks('分类')
    time=getMarks('时间')
    readNum=getMarks('阅读')

    for title,wen in zip(titles,wens):
        data={
        'title':title.get_text(),
        'wen':wen.get_text(),
        }

    data['src']=src
    data['type']=type
    data['time']=time
    data['readNum']=readNum
    post.insert(data)
    n+=1
    print(n)
