import urllib3
from bs4 import BeautifulSoup
import re
import random
import time
import xlwt
requesHeader = {
    'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Cache-Control': 'max-age=0',
    'Host': 'www.guidelines-registry.org',
    'Proxy-Connection': 'keep-alive',
    'Upgrade-Insecure-Requests': '1',
    'Referer': 'http://www.guidelines-registry.org/guid?lang=zh_CN&page=2&limit=10',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
}
requesHeader1 = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive',
    'Cookie': 'SESSION=NzBkZjlhMmYtYjEyNS00NDUyLTg1ZDktOTlkZDYxYTYyMTVi',
    'Host': 'www.guidelines-registry.org',
    'Referer': 'http://www.guidelines-registry.org/guid?lang=zh_CN',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
}

"""
先分析网页结构，这个就是个静态网页，数据都在html文件里；
按理说总页数也应该要爬的，我偷懒直接total_page=33了；
这个爬虫的流程就是先获取每一个论文详细信息对应的页码，存在pageNum_list里面；
再拼接处完整的链接请求详细数据；
"""

pageNum_list = []
total_page = 33
http = urllib3.PoolManager(num_pools=10, headers=requesHeader1)
flag = 0    #此flag用于判断是否第一次写入excel，第一次要在第一行写标签数据；
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet('Sheet1')

    #下面这段代码用于获取每篇论文对应的id，也就是页码；
# for page in range(total_page):
#     page += 1
#     url_page = 'http://www.guidelines-registry.org/guid?lang=zh_CN&page=' + str(page) + '&limit=10#table'
#     resp1 = http.request('GET', url_page)
#     soup = BeautifulSoup(resp1.data, "html.parser")
#     pageNum_soup = soup.find_all("table", class_='layui-table')
#     re_func = re.compile(r'.*/guid/([0-9]+)')
#     print(page)
#     for pageNum in pageNum_soup:
#         a = pageNum.find_all('a')
#         for i in a:
#             re1 = re_func.match(str(i))
#             pageNum_list.append(re1.group(1))
#     time.sleep(random.random()+1)
# print(pageNum_list)
pageNum_list = ['906', '905', '904', '903', '902', '901', '900', '899', '898', '897', '896', '895', '894', '893', '892', '891', '890', '889', '888', '886', '885', '884', '883', '882', '881', '867', '866', '865', '864', '863', '862', '861', '860', '859', '858', '857', '856', '855', '854', '853', '852', '851', '850', '849', '848', '847', '846', '845', '844', '843', '842', '841', '840', '839', '838', '837', '836', '835', '834', '833', '832', '831', '830', '829', '828', '827', '826', '825', '824', '823', '822', '821', '820', '819', '818', '817', '816', '815', '814', '813', '812', '811', '810', '809', '808', '807', '806', '805', '804', '803', '802', '801', '800', '799', '798', '797', '796', '795', '794', '793', '792', '791', '790', '789', '788', '787', '786', '785', '784', '783', '782', '781', '780', '779', '778', '777', '776', '775', '774', '773', '772', '771', '770', '769', '768', '767', '766', '765', '764', '763', '762', '761', '760', '759', '758', '757', '756', '755', '754', '753', '752', '751', '750', '749', '748', '747', '746', '745', '744', '743', '742', '740', '739', '738', '737', '736', '735', '734', '733', '732', '731', '730', '729', '728', '727', '726', '725', '724', '723', '722', '721', '720', '719', '718', '717', '716', '715', '714', '713', '712', '711', '710', '709', '708', '707', '706', '705', '704', '703', '702', '701', '700', '699', '698', '697', '696', '695', '694', '693', '692', '691', '690', '689', '688', '687', '686', '685', '684', '683', '682', '681', '680', '679', '678', '677', '676', '675', '674', '673', '672', '671', '670', '669', '668', '667', '666', '665', '664', '663', '662', '661', '660', '659', '658', '657', '656', '655', '654', '653', '652', '651', '650', '649', '648', '647', '646', '645', '644', '643', '642', '641', '640', '639', '638', '637', '636', '635', '634', '633', '632', '631', '630', '629', '628', '627', '626', '625', '624', '623', '622', '621', '620', '619', '618', '617', '616', '615', '614', '613', '612', '611', '610', '609', '608', '607', '606', '605', '604', '603', '602', '601', '600', '599', '598', '597', '596', '595', '594', '593', '592', '591', '590', '589', '588', '587', '586', '585', '584', '583', '582', '581', '580', '579', '578', '577', '576', '575', '574', '573', '572', '571', '570', '569', '568', '567', '566', '565', '564', '563']
#怕爬到一半被限制访问浪费太多时间，我直接把爬到的页码写出来了
count = 0

#下面就是爬取详细页面部分了，涉及到很多html的语法，我也不熟，就用beautifulsoup库解析就完事了。
for paper_num in pageNum_list:
    url2 = 'http://www.guidelines-registry.org/guid/' + str(paper_num) + '?lang=zh_CN'
    resp2 = http.request('GET', url2)
    soup2 = BeautifulSoup(resp2.data, "html.parser")
    table2 = soup2.find_all("table", class_="layui-table bj")
    div2 = soup2.find("div", class_="right_info2")
    ps = div2.find_all('p')
    tds_dic = {}
    tds_dic['paper_id'] = str(paper_num)
    for p in ps:
        p2 = p.text.strip().split('：')
        tds_dic[p2[0]] = p2[1]
    for body in table2:
        tr = body.find_all('tr')
        for tdss in tr:
            tds = tdss.find_all('td')
            key = tds[0].text.strip()
            vule1 = ''
            if len(tds) > 1:
                vule1 = tds[1].text.strip()
            tds_dic[key] = vule1
    if flag == 0:
        i = 0
        for data in tds_dic.keys():
            sheet.write(0, i, data)
            i += 1
        flag = 1
    i = 0
    for data in tds_dic.values():
        sheet.write(count+1, i, data)
        i += 1
    count += 1
    if count % 10 == 0:
        time.sleep(random.random()+5)
    print(paper_num)
workbook.save("PaperDetail.xls")

