#!/usr/bin/env python
# coding: utf-8
import requests
import time
import fake_useragent as fu    #动态的浏览器标识
ua = fu.UserAgent()
from bs4 import BeautifulSoup
import os
import re
os.chdir('D:/Document/书籍/NIFD国家金融与发展实验室')
#打开成果发布页面
time.sleep(3)
url_home = 'http://www.nifd.cn'
url_find = 'http://www.nifd.cn/Search/Index?category=ResearchResult&title='
header = {'User-Agent':ua.random }
response = requests.get(url_find,headers = header)
soup_find = BeautifulSoup(response.text,'lxml')
name = soup_find.select('#search > ul > li > a')
# 选取每篇文章的无前缀url
list = []
for item in name:
    result = {
        'title':item.get_text(),
        'link': item.get('href')
    }
    list.append(result)
# 生成每篇文章的链接
for i in range(len(list)):
    list[i]['link'] =  'http://www.nifd.cn' +list[i]['link']
# 打开每篇文章的链接
list_pdf = []
for i in range(len(list)):
    time.sleep(5)    #爬虫停滞时间
    try:
        temp_res = requests.get(list[i]['link'],headers = header)
        temp_soup = BeautifulSoup(temp_res.text,'lxml')
        temp_select = temp_soup.select('body > div.container.pager-container.por-container > div > div.col-lg-4.col-md-4.col-sm-4.col-xs-12 > div > div.annex-box > a')
        temp_article = temp_soup.select('body > div.container.pager-container.por-container > div > div.col-lg-8.col-md-8.col-sm-8.col-xs-12')
        temp_txt = temp_article[0].get_text()
        temp_name = re.match('\n.*\n',temp_txt).group()
        temp_name = temp_name[1:len(temp_name)-1]+'.html'
        temp_file = open(temp_name,'w+',encoding='utf-8')
        temp_file.write(temp_res.text)
        temp_file.close()
        temp_list = {
        'title':temp_select[0].get_text(),
        'link': temp_select[0].get('href')
    }
        temp_list['link'] = 'http://www.nifd.cn' +temp_list['link']
        list_pdf.append(temp_list)
    except:
        pass
#pdf链接文件写入
txt_time = str(time.localtime().tm_year)+'-'+str(time.localtime().tm_mon)+'-'+str(time.localtime().tm_mday)
txt_name = txt_time + '  NIFD国家金融与发展实验室.txt'
file = open(txt_name, mode = 'w+')
for i in range(len(list_pdf)):
    file.write(list_pdf[i]['title'])
    file.write(list_pdf[i]['link'])
    temp_pdf_name = list_pdf[i]['title'][2:-1]
    pdf_res = requests.get(list_pdf[i]['link'],headers = header)
    with open(temp_pdf_name, 'wb') as f:
        f.write(pdf_res.content)
        f.close()
    
file.close()
#实验
        
        

