"""
    由于作业3爬取网址无法成功，改爬取网易新闻要闻
"""

import urllib.request as ur
import lxml.etree as le
import re
from user_agent import get_user_agent_pc
import json

# 定义全局变量
URL = 'https://temp.163.com/special/00804KVA/cm_yaowen20200213{page}.js?callback=data_callback&date=20200115'
# 随机User_Agent
USER_AGENT = get_user_agent_pc()

def getResponse(url):
    """
    传入网址，并返回response对象
    :param url:
    :return:response
    """
    req = ur.Request(
        url=url,
        headers={
            'User-Agent': USER_AGENT,
            'Cookie': '_ntes_nuid=4c13fd104bead9d3e3e763d53c3c9f2d; vjuids=6e88187ee.1656bdbd13b.0.af1d1abd36b82; vjlast=1535112893.1535112893.30; mail_psc_fingerprint=3b1b7723caed78fffd40a82d9c73a50e; nts_mail_user=18902573006@163.com:-1:1; mp_MA-9ADA-91BF1A6C9E06_hubble=%7B%22sessionReferrer%22%3A%20%22https%3A%2F%2Fcampus.163.com%2F%22%2C%22updatedTime%22%3A%201589087657934%2C%22sessionStartTime%22%3A%201589087657922%2C%22sendNumClass%22%3A%20%7B%22allNum%22%3A%201%2C%22errSendNum%22%3A%200%7D%2C%22deviceUdid%22%3A%20%22e8c3e6fa-a633-47bc-b5f8-da2e44c15b91%22%2C%22persistedTime%22%3A%201589087657917%2C%22LASTEVENT%22%3A%20%7B%22eventId%22%3A%20%22da_screen%22%2C%22time%22%3A%201589087657935%7D%2C%22sessionUuid%22%3A%20%226f5f80b2-d643-4da7-a73a-88246e3fe1dc%22%7D; P_INFO=w18902573006@163.com|1591865946|1|mail163|00&99|gud&1591864262&mail_client#gud&440800#10#0#0|189006&1||w18902573006@163.com; _ntes_nnid=4c13fd104bead9d3e3e763d53c3c9f2d,1607185846869; _ns=NS1.2.1152229924.1613582330; NTES_hp_textlink1=old; s_n_f_l_n3=1ed20304d79a9f8e1614878232014; ne_analysis_trace_id=1614878251698; vinfo_n_f_l_n3=1ed20304d79a9f8e.1.14.1535112892745.1614551550629.1614878469780'
        }
    )
    response = ur.urlopen(req).read()
    return response


num = int(input('请输入要爬取新闻的数量：'))
# 每页新闻70条，用次函数判断需要多少页
page = num // 70 + 1
# 创建一个列表存放新闻题目和网址
news_list = []

# 创建循环，每70个新闻循环一次
for pn in range(1, page + 1):
    # 判断是否是第一页，并补全网址
    if pn == 1:
        url = URL.format(page='')
    else:
        url = URL.format(page='_%02d' % pn)
    # 由于获取新闻不需要headers，所以不调用函数
    request = ur.Request(url=url)
    # 经分析所得的数据格式为GBK格式，所以先进行解码
    response = ur.urlopen(request).read().decode('GBK')
    # 去掉影响json转换的因素
    response = response.replace('data_callback', '')
    response = re.sub(r'[()]', '', response)
    ret = json.loads(response)
    # 提取ret中的新闻标题及网址
    for data in ret:
        # data['title'] = re.sub(r'[/\\:*"<>|?]', ' ', data['title'])  # 同时格式化标题，防止后续操作报错
        news_data = {
            'title': data['title'],
            'url': data["docurl"],
        }
        # 将标题和网址添加进列表中
        news_list.append(news_data)

# 统计失败次数
error = 0
# 对列表前num项进行循环,
for i, news in enumerate(news_list[0:num]):
    try:
        # 调用函数
        response_news = getResponse(
            url=news['url'],
        )
        # 使用xpath获取标题
        title = le.HTML(response_news).xpath('//h1[@class="post_title"]/text()')[0]
        # 将标题转码
        try:
            title = str(title).encode('ISO-8859-1').decode('gbk')
        except UnicodeDecodeError:
            title = str(title).encode('ISO-8859-1').decode('utf-8')
        # 格式化标题，防止后续操作报错
        title = re.sub(
            r'[/\\:*"<>|?]', '', title
        )
        # 将下载的网址保存到news文件夹下，同时文件名以标题命名
        filepath = 'news/%s.html' % title
        with open(filepath, 'wb') as f:
            f.write(response_news)
        # 显示进度
        i += 1
        print('{:>4d}/{}\t'.format(i, num) + '\t新闻题目：' + title)
    except:
        i += 1
        error += 1
        print('{:>4d}/{}\t'.format(i, num) + '\t保存失败')

print('操作完成，下载成功 %d，下载失败 %d' % (num - error, error))