import requests, re, os, time
from lxml import etree
import pandas as pd

url = 'http://mse.cufe.edu.cn/zsjy.htm'
res = requests.get(url)
# 网站编码是utf8
res.encoding = 'utf8'
html = res.text
xml = etree.HTML(html)
# 招生就业要求内容
# text = re.sub(r'\r\n','',''.join(xml.xpath('//div[@id="vsb_content_2"]//text()')))
# 保存
# with open('招生就业要求.txt','w') as f:
#     f.write(text)
# 左侧3个分类的链接及名称
href_ls = ['http://mse.cufe.edu.cn/'+href for href in xml.xpath('//div[@class="list_nr"]/ul/li/a/@href')]
classify = xml.xpath('//div[@class="list_nr"]/ul/li/a/text()')
ls = []
for href,clas in zip(href_ls,classify):
    # 创建各自分类的文件夹
    # if os.path.exists(clas) == False:
    #     os.mkdir(clas)
    response = requests.get(href)
    response.encoding = 'utf8'
    xml_detail = etree.HTML(response.text)
    # 获取所有文章的父节点，再用for循环逐一解析
    xml_parent = xml_detail.xpath('//div[@class="kxyj_nr"]/div/ul/li')
    for each in xml_parent:
        dic = {}
        src = re.sub(r'\.\.','http://mse.cufe.edu.cn',each.xpath('./a/@href')[0])   # 文章链接
        title = re.sub(r'\s|\\r\\n\\r\\n','',''.join(each.xpath('.//text()')))      # 文章标题
        src_id = re.findall(r'\d+',each.xpath('./a/@href')[0])      # 文章编号，用于请求访问量
        # 访问量的数字被传入以下地址，原网页的源码中是没有的
        visit_url = f'http://mse.cufe.edu.cn/system/resource/code/news/click/dynclicks.jsp?clickid={src_id[-1]}&owner=1047611702'
        visit = requests.get(visit_url).text
        # 若对文章链接请求失败，则继续请求
        def f(src,count):
            if count == 0:
                print(title,'已达请求次数上限，稍后重爬或手动调高请求次数尝试')
                return '失败'
            try:
                res = requests.get(src)
                res.encoding = 'utf8'
                return res.text
            except:
                print(title, '的请求失败，将再次发出请求')
                count -= 1
                f(src,count)
        text = f(src,3)  # 数字3即为请求次数上限
        if text == '失败':
            continue
        xm = etree.HTML(text)
        # 解析文章内容
        t = ''.join(xm.xpath('//div[@class="xuexiao_nr"]/div/p//text()'))
        dic['标题'] = title
        dic['内容'] = t
        dic['访问量'] = visit
        ls.append(dic)
        # 拼接保存路径
        # filepath = clas+'\\'+re.sub(r'\|','_',title)+'.txt'
        # with open(filepath,'w',encoding='utf8') as f:
        #     # 保存的内容格式为标题，换行，访问量，换行，正文
        #     f.write('\t'+title+'\n'+'\t\t\t'+'访问量：'+visit+'\n'+t)
        time.sleep(1)
        print('已爬取',clas,title)
pd.DataFrame(ls).to_excel('信息.xlsx',index=False)